summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@gmail.com>2011-04-05 12:55:57 -0700
committerVishvananda Ishaya <vishvananda@gmail.com>2011-04-05 12:55:57 -0700
commitb66535602eae6b2f91cc5573798cd837e63f8ecc (patch)
treeecec28580523826f8315c226af32e4880478e7cf
parente0ba72946011b67a218e3c619b3105529bb43e53 (diff)
parent94ccd2f4a1c42a8574fe65972650428130ae850d (diff)
downloadnova-b66535602eae6b2f91cc5573798cd837e63f8ecc.tar.gz
nova-b66535602eae6b2f91cc5573798cd837e63f8ecc.tar.xz
nova-b66535602eae6b2f91cc5573798cd837e63f8ecc.zip
merged trunk
-rw-r--r--.bzrignore6
-rw-r--r--.mailmap4
-rw-r--r--Authors15
-rw-r--r--MANIFEST.in3
-rwxr-xr-xbin/nova-ajax-console-proxy37
-rwxr-xr-xbin/nova-api50
-rwxr-xr-xbin/nova-dhcpbridge6
-rwxr-xr-xbin/nova-direct-api37
-rwxr-xr-xbin/nova-instancemonitor2
-rwxr-xr-xbin/nova-manage419
-rwxr-xr-xbin/nova-objectstore15
-rwxr-xr-xbin/nova-vncproxy101
-rwxr-xr-xbin/stack14
-rw-r--r--contrib/boto_v6/ec2/connection.py100
-rwxr-xr-xcontrib/nova.sh40
-rw-r--r--doc/.autogenerated283
-rw-r--r--doc/build/html/.buildinfo4
-rw-r--r--doc/ext/nova_autodoc.py3
-rw-r--r--doc/source/_static/tweaks.css147
-rw-r--r--doc/source/_theme/layout.html11
-rw-r--r--doc/source/adminguide/distros/others.rst88
-rw-r--r--doc/source/adminguide/distros/ubuntu.10.04.rst40
-rw-r--r--doc/source/adminguide/distros/ubuntu.10.10.rst41
-rw-r--r--doc/source/adminguide/flags.rst23
-rw-r--r--doc/source/adminguide/multi.node.install.rst392
-rw-r--r--doc/source/adminguide/single.node.install.rst362
-rw-r--r--doc/source/api/autoindex.rst144
-rw-r--r--doc/source/api/nova..adminclient.rst6
-rw-r--r--doc/source/api/nova..api.direct.rst6
-rw-r--r--doc/source/api/nova..api.ec2.admin.rst6
-rw-r--r--doc/source/api/nova..api.ec2.apirequest.rst6
-rw-r--r--doc/source/api/nova..api.ec2.cloud.rst6
-rw-r--r--doc/source/api/nova..api.ec2.metadatarequesthandler.rst6
-rw-r--r--doc/source/api/nova..api.openstack.auth.rst6
-rw-r--r--doc/source/api/nova..api.openstack.backup_schedules.rst6
-rw-r--r--doc/source/api/nova..api.openstack.common.rst6
-rw-r--r--doc/source/api/nova..api.openstack.consoles.rst6
-rw-r--r--doc/source/api/nova..api.openstack.faults.rst6
-rw-r--r--doc/source/api/nova..api.openstack.flavors.rst6
-rw-r--r--doc/source/api/nova..api.openstack.images.rst6
-rw-r--r--doc/source/api/nova..api.openstack.servers.rst6
-rw-r--r--doc/source/api/nova..api.openstack.shared_ip_groups.rst6
-rw-r--r--doc/source/api/nova..api.openstack.zones.rst6
-rw-r--r--doc/source/api/nova..auth.dbdriver.rst6
-rw-r--r--doc/source/api/nova..auth.fakeldap.rst6
-rw-r--r--doc/source/api/nova..auth.ldapdriver.rst6
-rw-r--r--doc/source/api/nova..auth.manager.rst6
-rw-r--r--doc/source/api/nova..auth.signer.rst6
-rw-r--r--doc/source/api/nova..cloudpipe.pipelib.rst6
-rw-r--r--doc/source/api/nova..compute.api.rst6
-rw-r--r--doc/source/api/nova..compute.instance_types.rst6
-rw-r--r--doc/source/api/nova..compute.manager.rst6
-rw-r--r--doc/source/api/nova..compute.monitor.rst6
-rw-r--r--doc/source/api/nova..compute.power_state.rst6
-rw-r--r--doc/source/api/nova..console.api.rst6
-rw-r--r--doc/source/api/nova..console.fake.rst6
-rw-r--r--doc/source/api/nova..console.manager.rst6
-rw-r--r--doc/source/api/nova..console.xvp.rst6
-rw-r--r--doc/source/api/nova..context.rst6
-rw-r--r--doc/source/api/nova..crypto.rst6
-rw-r--r--doc/source/api/nova..db.api.rst6
-rw-r--r--doc/source/api/nova..db.base.rst6
-rw-r--r--doc/source/api/nova..db.migration.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.api.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migration.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.models.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.session.rst6
-rw-r--r--doc/source/api/nova..exception.rst6
-rw-r--r--doc/source/api/nova..fakememcache.rst6
-rw-r--r--doc/source/api/nova..fakerabbit.rst6
-rw-r--r--doc/source/api/nova..flags.rst6
-rw-r--r--doc/source/api/nova..image.glance.rst6
-rw-r--r--doc/source/api/nova..image.local.rst6
-rw-r--r--doc/source/api/nova..image.s3.rst6
-rw-r--r--doc/source/api/nova..image.service.rst6
-rw-r--r--doc/source/api/nova..log.rst6
-rw-r--r--doc/source/api/nova..manager.rst6
-rw-r--r--doc/source/api/nova..network.api.rst6
-rw-r--r--doc/source/api/nova..network.linux_net.rst6
-rw-r--r--doc/source/api/nova..network.manager.rst6
-rw-r--r--doc/source/api/nova..objectstore.bucket.rst6
-rw-r--r--doc/source/api/nova..objectstore.handler.rst6
-rw-r--r--doc/source/api/nova..objectstore.image.rst6
-rw-r--r--doc/source/api/nova..objectstore.stored.rst6
-rw-r--r--doc/source/api/nova..quota.rst6
-rw-r--r--doc/source/api/nova..rpc.rst6
-rw-r--r--doc/source/api/nova..scheduler.chance.rst6
-rw-r--r--doc/source/api/nova..scheduler.driver.rst6
-rw-r--r--doc/source/api/nova..scheduler.manager.rst6
-rw-r--r--doc/source/api/nova..scheduler.simple.rst6
-rw-r--r--doc/source/api/nova..scheduler.zone.rst6
-rw-r--r--doc/source/api/nova..service.rst6
-rw-r--r--doc/source/api/nova..test.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_adminapi.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_common.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_faults.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_flavors.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_images.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_servers.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_zones.rst6
-rw-r--r--doc/source/api/nova..tests.api.test_wsgi.rst6
-rw-r--r--doc/source/api/nova..tests.db.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.declare_flags.rst6
-rw-r--r--doc/source/api/nova..tests.fake_flags.rst6
-rw-r--r--doc/source/api/nova..tests.glance.stubs.rst6
-rw-r--r--doc/source/api/nova..tests.hyperv_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.objectstore_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.real_flags.rst6
-rw-r--r--doc/source/api/nova..tests.runtime_flags.rst6
-rw-r--r--doc/source/api/nova..tests.test_access.rst6
-rw-r--r--doc/source/api/nova..tests.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.test_cloud.rst6
-rw-r--r--doc/source/api/nova..tests.test_compute.rst6
-rw-r--r--doc/source/api/nova..tests.test_console.rst6
-rw-r--r--doc/source/api/nova..tests.test_direct.rst6
-rw-r--r--doc/source/api/nova..tests.test_flags.rst6
-rw-r--r--doc/source/api/nova..tests.test_instance_types.rst6
-rw-r--r--doc/source/api/nova..tests.test_localization.rst6
-rw-r--r--doc/source/api/nova..tests.test_log.rst6
-rw-r--r--doc/source/api/nova..tests.test_middleware.rst6
-rw-r--r--doc/source/api/nova..tests.test_misc.rst6
-rw-r--r--doc/source/api/nova..tests.test_network.rst6
-rw-r--r--doc/source/api/nova..tests.test_quota.rst6
-rw-r--r--doc/source/api/nova..tests.test_rpc.rst6
-rw-r--r--doc/source/api/nova..tests.test_scheduler.rst6
-rw-r--r--doc/source/api/nova..tests.test_service.rst6
-rw-r--r--doc/source/api/nova..tests.test_test.rst6
-rw-r--r--doc/source/api/nova..tests.test_twistd.rst6
-rw-r--r--doc/source/api/nova..tests.test_utils.rst6
-rw-r--r--doc/source/api/nova..tests.test_virt.rst6
-rw-r--r--doc/source/api/nova..tests.test_volume.rst6
-rw-r--r--doc/source/api/nova..tests.test_xenapi.rst6
-rw-r--r--doc/source/api/nova..tests.xenapi.stubs.rst6
-rw-r--r--doc/source/api/nova..twistd.rst6
-rw-r--r--doc/source/api/nova..utils.rst6
-rw-r--r--doc/source/api/nova..version.rst6
-rw-r--r--doc/source/api/nova..virt.connection.rst6
-rw-r--r--doc/source/api/nova..virt.disk.rst6
-rw-r--r--doc/source/api/nova..virt.fake.rst6
-rw-r--r--doc/source/api/nova..virt.hyperv.rst6
-rw-r--r--doc/source/api/nova..virt.images.rst6
-rw-r--r--doc/source/api/nova..virt.libvirt_conn.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.fake.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.network_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.vm_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.vmops.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.volume_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.volumeops.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi_conn.rst6
-rw-r--r--doc/source/api/nova..volume.api.rst6
-rw-r--r--doc/source/api/nova..volume.driver.rst6
-rw-r--r--doc/source/api/nova..volume.manager.rst6
-rw-r--r--doc/source/api/nova..volume.san.rst6
-rw-r--r--doc/source/api/nova..wsgi.rst6
-rw-r--r--doc/source/community.rst12
-rw-r--r--doc/source/images/vmwareapi_blockdiagram.jpgbin0 -> 75363 bytes
-rw-r--r--doc/source/index.rst20
-rw-r--r--doc/source/man/novamanage.rst62
-rw-r--r--doc/source/nova.concepts.rst5
-rw-r--r--doc/source/object.model.rst14
-rw-r--r--doc/source/quickstart.rst2
-rw-r--r--doc/source/runnova/binaries.rst (renamed from doc/source/adminguide/binaries.rst)4
-rw-r--r--doc/source/runnova/euca2ools.rst (renamed from doc/source/adminguide/euca2ools.rst)0
-rw-r--r--doc/source/runnova/flags.rst193
-rw-r--r--doc/source/runnova/getting.started.rst (renamed from doc/source/adminguide/getting.started.rst)15
-rw-r--r--doc/source/runnova/index.rst (renamed from doc/source/adminguide/index.rst)15
-rw-r--r--doc/source/runnova/managing.images.rst (renamed from doc/source/adminguide/managing.images.rst)0
-rw-r--r--doc/source/runnova/managing.instance.types.rst84
-rw-r--r--doc/source/runnova/managing.instances.rst (renamed from doc/source/adminguide/managing.instances.rst)0
-rw-r--r--doc/source/runnova/managing.networks.rst (renamed from doc/source/adminguide/managing.networks.rst)0
-rw-r--r--doc/source/runnova/managing.projects.rst (renamed from doc/source/adminguide/managing.projects.rst)0
-rw-r--r--doc/source/runnova/managing.users.rst (renamed from doc/source/adminguide/managing.users.rst)0
-rw-r--r--doc/source/runnova/managingsecurity.rst (renamed from doc/source/adminguide/managingsecurity.rst)0
-rw-r--r--doc/source/runnova/monitoring.rst (renamed from doc/source/adminguide/monitoring.rst)0
-rw-r--r--doc/source/runnova/network.flat.rst (renamed from doc/source/adminguide/network.flat.rst)0
-rw-r--r--doc/source/runnova/network.vlan.rst (renamed from doc/source/adminguide/network.vlan.rst)0
-rw-r--r--doc/source/runnova/nova.manage.rst (renamed from doc/source/adminguide/nova.manage.rst)23
-rw-r--r--doc/source/runnova/vncconsole.rst76
-rw-r--r--doc/source/vmwareapi_readme.rst218
-rw-r--r--etc/api-paste.ini (renamed from etc/nova-api.conf)24
-rw-r--r--nova/CA/.gitignore (renamed from CA/.gitignore)0
-rwxr-xr-xnova/CA/geninter.sh (renamed from CA/geninter.sh)2
-rwxr-xr-xnova/CA/genrootca.sh (renamed from CA/genrootca.sh)3
-rwxr-xr-xnova/CA/genvpn.sh (renamed from CA/genvpn.sh)0
-rw-r--r--nova/CA/newcerts/.placeholder (renamed from CA/newcerts/.placeholder)0
-rw-r--r--nova/CA/openssl.cnf.tmpl (renamed from CA/openssl.cnf.tmpl)0
-rw-r--r--nova/CA/private/.placeholder (renamed from CA/private/.placeholder)0
-rw-r--r--nova/CA/projects/.gitignore (renamed from CA/projects/.gitignore)0
-rw-r--r--nova/CA/projects/.placeholder (renamed from CA/projects/.placeholder)0
-rw-r--r--nova/CA/reqs/.gitignore (renamed from CA/reqs/.gitignore)0
-rw-r--r--nova/CA/reqs/.placeholder (renamed from CA/reqs/.placeholder)0
-rw-r--r--nova/adminclient.py476
-rw-r--r--nova/api/direct.py54
-rw-r--r--nova/api/ec2/__init__.py39
-rw-r--r--nova/api/ec2/admin.py30
-rw-r--r--nova/api/ec2/apirequest.py18
-rw-r--r--nova/api/ec2/cloud.py241
-rw-r--r--nova/api/ec2/ec2utils.py32
-rw-r--r--nova/api/ec2/metadatarequesthandler.py2
-rw-r--r--nova/api/openstack/__init__.py104
-rw-r--r--nova/api/openstack/accounts.py86
-rw-r--r--nova/api/openstack/auth.py44
-rw-r--r--nova/api/openstack/backup_schedules.py6
-rw-r--r--nova/api/openstack/common.py73
-rw-r--r--nova/api/openstack/consoles.py2
-rw-r--r--nova/api/openstack/contrib/__init__.py22
-rw-r--r--nova/api/openstack/contrib/volumes.py336
-rw-r--r--nova/api/openstack/extensions.py450
-rw-r--r--nova/api/openstack/faults.py47
-rw-r--r--nova/api/openstack/flavors.py61
-rw-r--r--nova/api/openstack/image_metadata.py93
-rw-r--r--nova/api/openstack/images.py223
-rw-r--r--nova/api/openstack/limits.py358
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py4
-rw-r--r--nova/api/openstack/server_metadata.py78
-rw-r--r--nova/api/openstack/servers.py574
-rw-r--r--nova/api/openstack/shared_ip_groups.py6
-rw-r--r--nova/api/openstack/users.py104
-rw-r--r--nova/api/openstack/versions.py60
-rw-r--r--nova/api/openstack/views/__init__.py0
-rw-r--r--nova/api/openstack/views/addresses.py42
-rw-r--r--nova/api/openstack/views/flavors.py96
-rw-r--r--nova/api/openstack/views/images.py114
-rw-r--r--nova/api/openstack/views/servers.py168
-rw-r--r--nova/api/openstack/views/versions.py59
-rw-r--r--nova/api/openstack/zones.py44
-rw-r--r--nova/auth/dbdriver.py2
-rw-r--r--nova/auth/fakeldap.py10
-rw-r--r--nova/auth/ldapdriver.py4
-rw-r--r--nova/auth/manager.py13
-rw-r--r--nova/compute/api.py230
-rw-r--r--nova/compute/instance_types.py123
-rw-r--r--nova/compute/manager.py559
-rw-r--r--nova/compute/power_state.py18
-rw-r--r--nova/console/manager.py2
-rw-r--r--nova/console/vmrc.py144
-rw-r--r--nova/console/vmrc_manager.py158
-rw-r--r--nova/console/xvp.py14
-rw-r--r--nova/crypto.py46
-rw-r--r--nova/db/api.py182
-rw-r--r--nova/db/base.py2
-rw-r--r--nova/db/sqlalchemy/api.py582
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py90
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py87
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py51
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py83
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py154
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py50
-rw-r--r--nova/db/sqlalchemy/migration.py2
-rw-r--r--nova/db/sqlalchemy/models.py83
-rw-r--r--nova/exception.py6
-rw-r--r--nova/fakerabbit.py1
-rw-r--r--nova/flags.py16
-rw-r--r--nova/image/fake.py113
-rw-r--r--nova/image/glance.py189
-rw-r--r--nova/image/local.py128
-rw-r--r--nova/image/s3.py291
-rw-r--r--nova/image/service.py95
-rw-r--r--nova/log.py5
-rw-r--r--nova/manager.py31
-rw-r--r--nova/network/api.py16
-rw-r--r--nova/network/linux_net.py620
-rw-r--r--nova/network/manager.py177
-rw-r--r--nova/network/vmwareapi_net.py91
-rw-r--r--nova/network/xenapi_net.py85
-rw-r--r--nova/objectstore/bucket.py181
-rw-r--r--nova/objectstore/handler.py478
-rw-r--r--nova/objectstore/image.py288
-rw-r--r--nova/objectstore/s3server.py335
-rw-r--r--nova/objectstore/stored.py63
-rw-r--r--nova/quota.py22
-rw-r--r--nova/rpc.py111
-rw-r--r--nova/scheduler/api.py241
-rw-r--r--nova/scheduler/chance.py4
-rw-r--r--nova/scheduler/driver.py244
-rw-r--r--nova/scheduler/manager.py74
-rw-r--r--nova/scheduler/simple.py12
-rw-r--r--nova/scheduler/zone.py5
-rw-r--r--nova/scheduler/zone_manager.py176
-rw-r--r--nova/service.py103
-rw-r--r--nova/test.py93
-rw-r--r--nova/tests/api/openstack/__init__.py2
-rw-r--r--nova/tests/api/openstack/common.py36
-rw-r--r--nova/tests/api/openstack/extensions/__init__.py15
-rw-r--r--nova/tests/api/openstack/extensions/foxinsocks.py98
-rw-r--r--nova/tests/api/openstack/fakes.py223
-rw-r--r--nova/tests/api/openstack/test_accounts.py123
-rw-r--r--nova/tests/api/openstack/test_adminapi.py3
-rw-r--r--nova/tests/api/openstack/test_auth.py88
-rw-r--r--nova/tests/api/openstack/test_common.py25
-rw-r--r--nova/tests/api/openstack/test_extensions.py236
-rw-r--r--nova/tests/api/openstack/test_faults.py122
-rw-r--r--nova/tests/api/openstack/test_flavors.py229
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py166
-rw-r--r--nova/tests/api/openstack/test_images.py731
-rw-r--r--nova/tests/api/openstack/test_limits.py584
-rw-r--r--nova/tests/api/openstack/test_ratelimiting.py243
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py164
-rw-r--r--nova/tests/api/openstack/test_servers.py1199
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py30
-rw-r--r--nova/tests/api/openstack/test_users.py159
-rw-r--r--nova/tests/api/openstack/test_versions.py123
-rw-r--r--nova/tests/api/openstack/test_zones.py90
-rw-r--r--nova/tests/api/test_wsgi.py209
-rw-r--r--nova/tests/db/fakes.py135
-rw-r--r--nova/tests/fake_flags.py1
-rw-r--r--nova/tests/fake_utils.py109
-rw-r--r--nova/tests/glance/stubs.py45
-rw-r--r--nova/tests/hyperv_unittest.py2
-rw-r--r--nova/tests/image/__init__.py16
-rw-r--r--nova/tests/image/test_glance.py236
-rw-r--r--nova/tests/integrated/__init__.py20
-rw-r--r--nova/tests/integrated/api/__init__.py20
-rw-r--r--nova/tests/integrated/api/client.py244
-rw-r--r--nova/tests/integrated/integrated_helpers.py221
-rw-r--r--nova/tests/integrated/test_extensions.py44
-rw-r--r--nova/tests/integrated/test_login.py68
-rw-r--r--nova/tests/integrated/test_servers.py184
-rw-r--r--nova/tests/integrated/test_volumes.py295
-rw-r--r--nova/tests/network/__init__.py67
-rw-r--r--nova/tests/network/base.py154
-rw-r--r--nova/tests/objectstore_unittest.py315
-rw-r--r--nova/tests/test_api.py14
-rw-r--r--nova/tests/test_auth.py15
-rw-r--r--nova/tests/test_cloud.py112
-rw-r--r--nova/tests/test_compute.py405
-rw-r--r--nova/tests/test_console.py2
-rw-r--r--nova/tests/test_direct.py31
-rw-r--r--nova/tests/test_flat_network.py161
-rw-r--r--nova/tests/test_instance_types.py86
-rw-r--r--nova/tests/test_localization.py3
-rw-r--r--nova/tests/test_middleware.py4
-rw-r--r--nova/tests/test_misc.py93
-rw-r--r--nova/tests/test_network.py483
-rw-r--r--nova/tests/test_objectstore.py148
-rw-r--r--nova/tests/test_quota.py119
-rw-r--r--nova/tests/test_rpc.py4
-rw-r--r--nova/tests/test_scheduler.py788
-rw-r--r--nova/tests/test_service.py65
-rw-r--r--nova/tests/test_test.py2
-rw-r--r--nova/tests/test_utils.py78
-rw-r--r--nova/tests/test_virt.py416
-rw-r--r--nova/tests/test_vlan_network.py242
-rw-r--r--nova/tests/test_vmwareapi.py252
-rw-r--r--nova/tests/test_volume.py198
-rw-r--r--nova/tests/test_xenapi.py435
-rw-r--r--nova/tests/test_zones.py206
-rw-r--r--nova/tests/vmwareapi/__init__.py21
-rw-r--r--nova/tests/vmwareapi/db_fakes.py109
-rw-r--r--nova/tests/vmwareapi/stubs.py46
-rw-r--r--nova/tests/xenapi/stubs.py104
-rw-r--r--nova/utils.py280
-rw-r--r--nova/virt/connection.py9
-rw-r--r--nova/virt/cpuinfo.xml.template9
-rw-r--r--nova/virt/disk.py109
-rw-r--r--nova/virt/driver.py243
-rw-r--r--nova/virt/fake.py114
-rw-r--r--nova/virt/hyperv.py26
-rw-r--r--nova/virt/images.py30
-rw-r--r--nova/virt/interfaces.template23
-rw-r--r--nova/virt/libvirt.xml.template55
-rw-r--r--nova/virt/libvirt_conn.py1168
-rw-r--r--nova/virt/vmwareapi/__init__.py19
-rw-r--r--nova/virt/vmwareapi/error_util.py96
-rw-r--r--nova/virt/vmwareapi/fake.py711
-rw-r--r--nova/virt/vmwareapi/io_util.py168
-rw-r--r--nova/virt/vmwareapi/network_utils.py149
-rw-r--r--nova/virt/vmwareapi/read_write_util.py182
-rw-r--r--nova/virt/vmwareapi/vim.py180
-rw-r--r--nova/virt/vmwareapi/vim_util.py217
-rw-r--r--nova/virt/vmwareapi/vm_util.py306
-rw-r--r--nova/virt/vmwareapi/vmops.py789
-rw-r--r--nova/virt/vmwareapi/vmware_images.py201
-rw-r--r--nova/virt/vmwareapi_conn.py376
-rw-r--r--nova/virt/xenapi/fake.py212
-rw-r--r--nova/virt/xenapi/network_utils.py19
-rw-r--r--nova/virt/xenapi/vm_utils.py722
-rw-r--r--nova/virt/xenapi/vmops.py901
-rw-r--r--nova/virt/xenapi/volume_utils.py6
-rw-r--r--nova/virt/xenapi/volumeops.py2
-rw-r--r--nova/virt/xenapi_conn.py94
-rw-r--r--nova/vnc/__init__.py34
-rw-r--r--nova/vnc/auth.py138
-rw-r--r--nova/vnc/proxy.py131
-rw-r--r--nova/volume/api.py3
-rw-r--r--nova/volume/driver.py254
-rw-r--r--nova/volume/manager.py13
-rw-r--r--nova/wsgi.py134
-rwxr-xr-xplugins/xenserver/networking/etc/xensource/scripts/vif_rules.py93
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/agent83
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance405
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/migration118
-rw-r--r--po/nova.pot10
-rw-r--r--pylintrc13
-rw-r--r--run_tests.py2
-rwxr-xr-xrun_tests.sh2
-rw-r--r--setup.py1
-rw-r--r--smoketests/base.py55
-rwxr-xr-xsmoketests/proxy.sh11
-rw-r--r--smoketests/public_network_smoketests.py6
-rw-r--r--smoketests/run_tests.py310
-rw-r--r--smoketests/test_admin.py (renamed from smoketests/admin_smoketests.py)9
-rw-r--r--smoketests/test_netadmin.py (renamed from smoketests/netadmin_smoketests.py)17
-rw-r--r--smoketests/test_sysadmin.py (renamed from smoketests/sysadmin_smoketests.py)51
-rw-r--r--tools/esx/guest_tool.py345
-rwxr-xr-xtools/euca-get-ajax-console12
-rw-r--r--tools/pip-requires3
422 files changed, 30150 insertions, 6430 deletions
diff --git a/.bzrignore b/.bzrignore
index b271561a3..b751ad825 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -13,3 +13,9 @@ CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
nova/vcsversion.py
+*.DS_Store
+.project
+.pydevproject
+clean.sqlite
+run_tests.log
+tests.sqlite
diff --git a/.mailmap b/.mailmap
index a839eba6c..ccf2109a7 100644
--- a/.mailmap
+++ b/.mailmap
@@ -15,10 +15,12 @@
<corywright@gmail.com> <cory.wright@rackspace.com>
<devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver>
+<itoumsn@nttdata.co.jp> <itoumsn@shayol>
<jaypipes@gmail.com> <jpipes@serialcoder>
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
+<josh@jk0.org> <josh.kearney@rackspace.com>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
<justin@fathomdb.com> <superstack@superstack.org>
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
@@ -26,6 +28,7 @@
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<mordred@inaugust.com> <mordred@hudson>
+<nirmal.ranganathan@rackspace.com> <nirmal.ranganathan@rackspace.coom>
<paul@openstack.org> <paul.voccio@rackspace.com>
<paul@openstack.org> <pvoccio@castor.local>
<rconradharris@gmail.com> <rick.harris@rackspace.com>
@@ -40,4 +43,5 @@
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
+<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
diff --git a/Authors b/Authors
index 494e614a0..48b912184 100644
--- a/Authors
+++ b/Authors
@@ -1,4 +1,5 @@
Andy Smith <code@term.ie>
+Andy Southgate <andy.southgate@citrix.com>
Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
@@ -11,6 +12,7 @@ Chiradeep Vittal <chiradeep@cloud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Behrens <cbehrens@codestud.com>
Christian Berendt <berendt@b1-systems.de>
+Chuck Short <zulcss@ubuntu.com>
Cory Wright <corywright@gmail.com>
Dan Prince <dan.prince@rackspace.com>
David Pravec <David.Pravec@danix.org>
@@ -19,7 +21,9 @@ Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
+Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
+Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
@@ -28,16 +32,21 @@ Jesse Andrews <anotherjesse@gmail.com>
Joe Heck <heckj@mac.com>
Joel Moore <joelbm24@gmail.com>
John Dewey <john@dewey.ws>
+John Tran <jtran@attinteractive.com>
Jonathan Bryce <jbryce@jbryce.com>
Jordan Rinke <jordan@openstack.org>
Josh Durgin <joshd@hq.newdream.net>
-Josh Kearney <josh.kearney@rackspace.com>
+Josh Kearney <josh@jk0.org>
+Josh Kleinpeter <josh@kleinpeter.org>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
+Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
+Mark Washenberger <mark.washenberger@rackspace.com>
+Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monsyne Dragon <mdragon@rackspace.com>
@@ -45,7 +54,8 @@ Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
-Naveed Massjouni <naveed.massjouni@rackspace.com>
+Naveed Massjouni <naveedm9@gmail.com>
+Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
Paul Voccio <paul@openstack.org>
Ricardo Carrillo Cruz <emaildericky@gmail.com>
Rick Clark <rick@openstack.org>
@@ -55,6 +65,7 @@ Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com>
+Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com>
Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org>
Todd Willey <todd@ansolabs.com>
diff --git a/MANIFEST.in b/MANIFEST.in
index 2ceed34f3..e7a6e7da4 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,7 @@
include HACKING LICENSE run_tests.py run_tests.sh
include README builddeb.sh exercise_rsapi.py
include ChangeLog MANIFEST.in pylintrc Authors
-graft CA
+graft nova/CA
graft doc
graft smoketests
graft tools
@@ -25,6 +25,7 @@ include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/db/sqlalchemy/migrate_repo/README
include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template
+include nova/virt/cpuinfo.xml.template
include nova/tests/CA/
include nova/tests/CA/cacert.pem
include nova/tests/CA/private/
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
index 1e11c6d58..d88f59e40 100755
--- a/bin/nova-ajax-console-proxy
+++ b/bin/nova-ajax-console-proxy
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -63,10 +63,16 @@ class AjaxConsoleProxy(object):
def __call__(self, env, start_response):
try:
- req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
- env['HTTP_HOST'],
- env['PATH_INFO'],
- env['QUERY_STRING'])
+ if 'QUERY_STRING' in env:
+ req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
+ env['HTTP_HOST'],
+ env['PATH_INFO'],
+ env['QUERY_STRING'])
+ else:
+ req_url = '%s://%s%s' % (env['wsgi.url_scheme'],
+ env['HTTP_HOST'],
+ env['PATH_INFO'])
+
if 'HTTP_REFERER' in env:
auth_url = env['HTTP_REFERER']
else:
@@ -102,17 +108,17 @@ class AjaxConsoleProxy(object):
return "Server Error"
def register_listeners(self):
- class Callback:
- def __call__(self, data, message):
- if data['method'] == 'authorize_ajax_console':
- AjaxConsoleProxy.tokens[data['args']['token']] = \
- {'args': data['args'], 'last_activity': time.time()}
+ class TopicProxy():
+ @staticmethod
+ def authorize_ajax_console(context, **kwargs):
+ AjaxConsoleProxy.tokens[kwargs['token']] = \
+ {'args': kwargs, 'last_activity': time.time()}
conn = rpc.Connection.instance(new=True)
- consumer = rpc.TopicConsumer(
- connection=conn,
- topic=FLAGS.ajax_console_proxy_topic)
- consumer.register_callback(Callback())
+ consumer = rpc.TopicAdapterConsumer(
+ connection=conn,
+ proxy=TopicProxy,
+ topic=FLAGS.ajax_console_proxy_topic)
def delete_expired_tokens():
now = time.time()
@@ -124,8 +130,7 @@ class AjaxConsoleProxy(object):
for k in to_delete:
del AjaxConsoleProxy.tokens[k]
- utils.LoopingCall(consumer.fetch, auto_ack=True,
- enable_callbacks=True).start(0.1)
+ utils.LoopingCall(consumer.fetch, enable_callbacks=True).start(0.1)
utils.LoopingCall(delete_expired_tokens).start(1)
if __name__ == '__main__':
diff --git a/bin/nova-api b/bin/nova-api
index cf140570a..a1088c23d 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -36,50 +36,18 @@ gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
+from nova import service
+from nova import utils
from nova import version
from nova import wsgi
+
LOG = logging.getLogger('nova.api')
FLAGS = flags.FLAGS
-flags.DEFINE_string('ec2_listen', "0.0.0.0",
- 'IP address for EC2 API to listen')
-flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
-flags.DEFINE_string('osapi_listen', "0.0.0.0",
- 'IP address for OpenStack API to listen')
-flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
-flags.DEFINE_flag(flags.HelpFlag())
-flags.DEFINE_flag(flags.HelpshortFlag())
-flags.DEFINE_flag(flags.HelpXMLFlag())
-
-API_ENDPOINTS = ['ec2', 'osapi']
-
-
-def run_app(paste_config_file):
- LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file)
- apps = []
- for api in API_ENDPOINTS:
- config = wsgi.load_paste_configuration(paste_config_file, api)
- if config is None:
- LOG.debug(_("No paste configuration for app: %s"), api)
- continue
- LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
- LOG.info(_("Running %s API"), api)
- app = wsgi.load_paste_app(paste_config_file, api)
- apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
- getattr(FLAGS, "%s_listen" % api)))
- if len(apps) == 0:
- LOG.error(_("No known API applications configured in %s."),
- paste_config_file)
- return
-
- server = wsgi.Server()
- for app in apps:
- server.start(*app)
- server.wait()
-
if __name__ == '__main__':
+ utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
LOG.audit(_("Starting nova-api node (version %s)"),
@@ -88,8 +56,6 @@ if __name__ == '__main__':
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
LOG.debug("%(flag)s : %(flag_get)s" % locals())
- conf = wsgi.paste_config_file('nova-api.conf')
- if conf:
- run_app(conf)
- else:
- LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf')
+
+ service = service.serve_wsgi(service.ApiService)
+ service.wait()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 3dd9de367..f42dfd6b5 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -48,6 +48,7 @@ flags.DECLARE('auth_driver', 'nova.auth.manager')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
+flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface')
LOG = logging.getLogger('nova.dhcpbridge')
@@ -94,7 +95,7 @@ def init_leases(interface):
"""Get the list of hosts for an interface."""
ctxt = context.get_admin_context()
network_ref = db.network_get_by_bridge(ctxt, interface)
- return linux_net.get_dhcp_hosts(ctxt, network_ref['id'])
+ return linux_net.get_dhcp_leases(ctxt, network_ref['id'])
def main():
@@ -103,7 +104,8 @@ def main():
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
logging.setup()
- interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
+ # check ENV first so we don't break any older deploys
+ interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
if int(os.environ.get('TESTING', '0')):
from nova.tests import fake_flags
action = argv[1]
diff --git a/bin/nova-direct-api b/bin/nova-direct-api
index bf29d9a5e..83ec72722 100755
--- a/bin/nova-direct-api
+++ b/bin/nova-direct-api
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -34,12 +34,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import compute
from nova import flags
from nova import log as logging
+from nova import network
from nova import utils
+from nova import volume
from nova import wsgi
from nova.api import direct
-from nova.compute import api as compute_api
FLAGS = flags.FLAGS
@@ -50,13 +52,42 @@ flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
+# An example of an API that only exposes read-only methods.
+# In this case we're just limiting which methods are exposed.
+class ReadOnlyCompute(direct.Limited):
+ """Read-only Compute API."""
+
+ _allowed = ['get', 'get_all', 'get_console_output']
+
+
+# An example of an API that provides a backwards compatibility layer.
+# In this case we're overwriting the implementation to ensure
+# compatibility with an older version. In reality we would want the
+# "description=None" to be part of the actual API so that code
+# like this isn't even necessary, but this example shows what one can
+# do if that isn't the situation.
+class VolumeVersionOne(direct.Limited):
+ _allowed = ['create', 'delete', 'update', 'get']
+
+ def create(self, context, size, name):
+ self.proxy.create(context, size, name, description=None)
+
+
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
- direct.register_service('compute', compute_api.API())
+ direct.register_service('compute', compute.API())
+ direct.register_service('volume', volume.API())
+ direct.register_service('network', network.API())
direct.register_service('reflect', direct.Reflection())
+
+ # Here is how we could expose the code in the examples above.
+ #direct.register_service('compute-readonly',
+ # ReadOnlyCompute(compute.API()))
+ #direct.register_service('volume-v1', VolumeVersionOne(volume.API()))
+
router = direct.Router()
with_json = direct.JsonParamsMiddleware(router)
with_req = direct.PostParamsMiddleware(with_json)
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index 24cc9fd23..b9d4e49d7 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -50,7 +50,7 @@ if __name__ == '__main__':
if __name__ == '__builtin__':
LOG.warn(_('Starting instance monitor'))
- # pylint: disable-msg=C0103
+ # pylint: disable=C0103
monitor = monitor.InstanceMonitor()
# This is the parent service that twistd will be looking for when it
diff --git a/bin/nova-manage b/bin/nova-manage
index b603c8b07..6789efba8 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -55,6 +55,8 @@
import datetime
import gettext
+import glob
+import json
import os
import re
import sys
@@ -81,9 +83,10 @@ from nova import log as logging
from nova import quota
from nova import rpc
from nova import utils
-from nova.api.ec2.cloud import ec2_id_to_id
+from nova.api.ec2 import ec2utils
from nova.auth import manager
from nova.cloudpipe import pipelib
+from nova.compute import instance_types
from nova.db import migration
FLAGS = flags.FLAGS
@@ -93,6 +96,8 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
+flags.DECLARE('images_path', 'nova.image.local')
+flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
@@ -103,7 +108,7 @@ def param2id(object_id):
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if '-' in object_id:
- return ec2_id_to_id(object_id)
+ return ec2utils.ec2_id_to_id(object_id)
else:
return int(object_id)
@@ -272,7 +277,7 @@ def _db_error(caught_exception):
print caught_exception
print _("The above error may show that the database has not "
"been created.\nPlease create a database using "
- "nova-manage sync db before running this command.")
+ "'nova-manage db sync' before running this command.")
exit(1)
@@ -433,6 +438,8 @@ class ProjectCommands(object):
"been created.\nPlease create a database by running a "
"nova-api server on this host.")
+AccountCommands = ProjectCommands
+
class FixedIpCommands(object):
"""Class for managing fixed ip."""
@@ -440,10 +447,15 @@ class FixedIpCommands(object):
def list(self, host=None):
"""Lists all fixed ips (optionally by host) arguments: [host]"""
ctxt = context.get_admin_context()
- if host == None:
- fixed_ips = db.fixed_ip_get_all(ctxt)
- else:
- fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host)
+
+ try:
+ if host == None:
+ fixed_ips = db.fixed_ip_get_all(ctxt)
+ else:
+ fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host)
+ except exception.NotFound as ex:
+ print "error: %s" % ex
+ sys.exit(2)
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'),
_('IP address'),
@@ -460,9 +472,9 @@ class FixedIpCommands(object):
host = instance['host']
mac_address = instance['mac_address']
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (
- fixed_ip['network']['cidr'],
- fixed_ip['address'],
- mac_address, hostname, host)
+ fixed_ip['network']['cidr'],
+ fixed_ip['address'],
+ mac_address, hostname, host)
class FloatingIpCommands(object):
@@ -507,11 +519,12 @@ class NetworkCommands(object):
network_size=None, vlan_start=None,
vpn_start=None, fixed_range_v6=None, label='public'):
"""Creates fixed ips for host by range
- arguments: [fixed_range=FLAG], [num_networks=FLAG],
+ arguments: fixed_range=FLAG, [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
if not fixed_range:
- fixed_range = FLAGS.fixed_range
+ raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is '
+ 'required to create networks.'))
if not num_networks:
num_networks = FLAGS.num_networks
if not network_size:
@@ -544,6 +557,51 @@ class NetworkCommands(object):
network.dhcp_start,
network.dns)
+ def delete(self, fixed_range):
+ """Deletes a network"""
+ network = db.network_get_by_cidr(context.get_admin_context(), \
+ fixed_range)
+ if network.project_id is not None:
+ raise ValueError(_('Network must be disassociated from project %s'
+ ' before delete' % network.project_id))
+ db.network_delete_safe(context.get_admin_context(), network.id)
+
+
+class VmCommands(object):
+ """Class for mangaging VM instances."""
+
+ def live_migration(self, ec2_id, dest):
+ """Migrates a running instance to a new machine.
+
+ :param ec2_id: instance id which comes from euca-describe-instance.
+ :param dest: destination host name.
+
+ """
+
+ ctxt = context.get_admin_context()
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
+
+ if (FLAGS.connection_type != 'libvirt' or
+ (FLAGS.connection_type == 'libvirt' and
+ FLAGS.libvirt_type not in ['kvm', 'qemu'])):
+ msg = _('Only KVM and QEmu are supported for now. Sorry!')
+ raise exception.Error(msg)
+
+ if (FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \
+ FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver'):
+ msg = _("Support only AOEDriver and ISCSIDriver. Sorry!")
+ raise exception.Error(msg)
+
+ rpc.call(ctxt,
+ FLAGS.scheduler_topic,
+ {"method": "live_migration",
+ "args": {"instance_id": instance_id,
+ "dest": dest,
+ "topic": FLAGS.compute_topic}})
+
+ print _('Migration of %s initiated.'
+ 'Check its progress using euca-describe-instances.') % ec2_id
+
class ServiceCommands(object):
"""Enable and disable running services"""
@@ -589,6 +647,59 @@ class ServiceCommands(object):
return
db.service_update(ctxt, svc['id'], {'disabled': True})
+ def describe_resource(self, host):
+ """Describes cpu/memory/hdd info for host.
+
+ :param host: hostname.
+
+ """
+
+ result = rpc.call(context.get_admin_context(),
+ FLAGS.scheduler_topic,
+ {"method": "show_host_resources",
+ "args": {"host": host}})
+
+ if type(result) != dict:
+ print _('An unexpected error has occurred.')
+ print _('[Result]'), result
+ else:
+ cpu = result['resource']['vcpus']
+ mem = result['resource']['memory_mb']
+ hdd = result['resource']['local_gb']
+ cpu_u = result['resource']['vcpus_used']
+ mem_u = result['resource']['memory_mb_used']
+ hdd_u = result['resource']['local_gb_used']
+
+ print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
+ print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd)
+ print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u)
+ for p_id, val in result['usage'].items():
+ print '%s\t\t%s\t\t%s\t%s\t%s' % (host,
+ p_id,
+ val['vcpus'],
+ val['memory_mb'],
+ val['local_gb'])
+
+ def update_resource(self, host):
+ """Updates available vcpu/memory/disk info for host.
+
+ :param host: hostname.
+
+ """
+
+ ctxt = context.get_admin_context()
+ service_refs = db.service_get_all_by_host(ctxt, host)
+ if len(service_refs) <= 0:
+ raise exception.Invalid(_('%s does not exist.') % host)
+
+ service_refs = [s for s in service_refs if s['topic'] == 'compute']
+ if len(service_refs) <= 0:
+ raise exception.Invalid(_('%s is not compute node.') % host)
+
+ rpc.call(ctxt,
+ db.queue_get_for(ctxt, FLAGS.compute_topic, host),
+ {"method": "update_available_resource"})
+
class LogCommands(object):
def request(self, request_id, logfile='/var/log/nova.log'):
@@ -614,6 +725,49 @@ class DbCommands(object):
print migration.db_version()
+class InstanceCommands(object):
+ """Class for managing instances."""
+
+ def list(self, host=None, instance=None):
+ """Show a list of all instances"""
+ print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
+ " %-10s %-10s %-10s %-5s" % (
+ _('instance'),
+ _('node'),
+ _('type'),
+ _('state'),
+ _('launched'),
+ _('image'),
+ _('kernel'),
+ _('ramdisk'),
+ _('project'),
+ _('user'),
+ _('zone'),
+ _('index'))
+
+ if host == None:
+ instances = db.instance_get_all(context.get_admin_context())
+ else:
+ instances = db.instance_get_all_by_host(
+ context.get_admin_context(), host)
+
+ for instance in instances:
+ print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
+ " %-10s %-10s %-10s %-5d" % (
+ instance['hostname'],
+ instance['host'],
+ instance['instance_type'],
+ instance['state_description'],
+ instance['launched_at'],
+ instance['image_id'],
+ instance['kernel_id'],
+ instance['ramdisk_id'],
+ instance['project_id'],
+ instance['user_id'],
+ instance['availability_zone'],
+ instance['launch_index'])
+
+
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
@@ -661,8 +815,231 @@ class VolumeCommands(object):
"mountpoint": volume['mountpoint']}})
+class InstanceTypeCommands(object):
+ """Class for managing instance types / flavors."""
+
+ def _print_instance_types(self, n, val):
+ deleted = ('', ', inactive')[val["deleted"] == 1]
+ print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, "
+ "Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % (
+ n, val["memory_mb"], val["vcpus"], val["local_gb"],
+ val["flavorid"], val["swap"], val["rxtx_quota"],
+ val["rxtx_cap"], deleted)
+
+ def create(self, name, memory, vcpus, local_gb, flavorid,
+ swap=0, rxtx_quota=0, rxtx_cap=0):
+ """Creates instance types / flavors
+ arguments: name memory vcpus local_gb flavorid [swap] [rxtx_quota]
+ [rxtx_cap]
+ """
+ try:
+ instance_types.create(name, memory, vcpus, local_gb,
+ flavorid, swap, rxtx_quota, rxtx_cap)
+ except exception.InvalidInputException:
+ print "Must supply valid parameters to create instance type"
+ print e
+ sys.exit(1)
+ except exception.DBError, e:
+ print "DB Error: %s" % e
+ sys.exit(2)
+ except:
+ print "Unknown error"
+ sys.exit(3)
+ else:
+ print "%s created" % name
+
+ def delete(self, name, purge=None):
+ """Marks instance types / flavors as deleted
+ arguments: name"""
+ try:
+ if purge == "--purge":
+ instance_types.purge(name)
+ verb = "purged"
+ else:
+ instance_types.destroy(name)
+ verb = "deleted"
+ except exception.ApiError:
+ print "Valid instance type name is required"
+ sys.exit(1)
+ except exception.DBError, e:
+ print "DB Error: %s" % e
+ sys.exit(2)
+ except:
+ sys.exit(3)
+ else:
+ print "%s %s" % (name, verb)
+
+ def list(self, name=None):
+ """Lists all active or specific instance types / flavors
+ arguments: [name]"""
+ try:
+ if name == None:
+ inst_types = instance_types.get_all_types()
+ elif name == "--all":
+ inst_types = instance_types.get_all_types(True)
+ else:
+ inst_types = instance_types.get_instance_type(name)
+ except exception.DBError, e:
+ _db_error(e)
+ if isinstance(inst_types.values()[0], dict):
+ for k, v in inst_types.iteritems():
+ self._print_instance_types(k, v)
+ else:
+ self._print_instance_types(name, inst_types)
+
+
+class ImageCommands(object):
+ """Methods for dealing with a cloud in an odd state"""
+
+ def __init__(self, *args, **kwargs):
+ self.image_service = utils.import_object(FLAGS.image_service)
+
+ def _register(self, image_type, disk_format, container_format,
+ path, owner, name=None, is_public='T',
+ architecture='x86_64', kernel_id=None, ramdisk_id=None):
+ meta = {'is_public': True,
+ 'name': name,
+ 'disk_format': disk_format,
+ 'container_format': container_format,
+ 'properties': {'image_state': 'available',
+ 'owner_id': owner,
+ 'type': image_type,
+ 'architecture': architecture,
+ 'image_location': 'local',
+ 'is_public': (is_public == 'T')}}
+ print image_type, meta
+ if kernel_id:
+ meta['properties']['kernel_id'] = int(kernel_id)
+ if ramdisk_id:
+ meta['properties']['ramdisk_id'] = int(ramdisk_id)
+ elevated = context.get_admin_context()
+ try:
+ with open(path) as ifile:
+ image = self.image_service.create(elevated, meta, ifile)
+ new = image['id']
+ print _("Image registered to %(new)s (%(new)08x).") % locals()
+ return new
+ except Exception as exc:
+ print _("Failed to register %(path)s: %(exc)s") % locals()
+
+ def all_register(self, image, kernel, ramdisk, owner, name=None,
+ is_public='T', architecture='x86_64'):
+ """Uploads an image, kernel, and ramdisk into the image_service
+ arguments: image kernel ramdisk owner [name] [is_public='T']
+ [architecture='x86_64']"""
+ kernel_id = self.kernel_register(kernel, owner, None,
+ is_public, architecture)
+ ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
+ is_public, architecture)
+ self.image_register(image, owner, name, is_public,
+ architecture, kernel_id, ramdisk_id)
+
+ def image_register(self, path, owner, name=None, is_public='T',
+ architecture='x86_64', kernel_id=None, ramdisk_id=None,
+ disk_format='ami', container_format='ami'):
+ """Uploads an image into the image_service
+ arguments: path owner [name] [is_public='T'] [architecture='x86_64']
+ [kernel_id=None] [ramdisk_id=None]
+ [disk_format='ami'] [container_format='ami']"""
+ return self._register('machine', disk_format, container_format, path,
+ owner, name, is_public, architecture,
+ kernel_id, ramdisk_id)
+
+ def kernel_register(self, path, owner, name=None, is_public='T',
+ architecture='x86_64'):
+ """Uploads a kernel into the image_service
+ arguments: path owner [name] [is_public='T'] [architecture='x86_64']
+ """
+ return self._register('kernel', 'aki', 'aki', path, owner, name,
+ is_public, architecture)
+
+ def ramdisk_register(self, path, owner, name=None, is_public='T',
+ architecture='x86_64'):
+ """Uploads a ramdisk into the image_service
+ arguments: path owner [name] [is_public='T'] [architecture='x86_64']
+ """
+ return self._register('ramdisk', 'ari', 'ari', path, owner, name,
+ is_public, architecture)
+
+ def _lookup(self, old_image_id):
+ try:
+ internal_id = ec2utils.ec2_id_to_id(old_image_id)
+ image = self.image_service.show(context, internal_id)
+ except exception.NotFound:
+ image = self.image_service.show_by_name(context, old_image_id)
+ return image['id']
+
+ def _old_to_new(self, old):
+ mapping = {'machine': 'ami',
+ 'kernel': 'aki',
+ 'ramdisk': 'ari'}
+ container_format = mapping[old['type']]
+ disk_format = container_format
+ new = {'disk_format': disk_format,
+ 'container_format': container_format,
+ 'is_public': True,
+ 'name': old['imageId'],
+ 'properties': {'image_state': old['imageState'],
+ 'owner_id': old['imageOwnerId'],
+ 'architecture': old['architecture'],
+ 'type': old['type'],
+ 'image_location': old['imageLocation'],
+ 'is_public': old['isPublic']}}
+ if old.get('kernelId'):
+ new['properties']['kernel_id'] = self._lookup(old['kernelId'])
+ if old.get('ramdiskId'):
+ new['properties']['ramdisk_id'] = self._lookup(old['ramdiskId'])
+ return new
+
+ def _convert_images(self, images):
+ elevated = context.get_admin_context()
+ for image_path, image_metadata in images.iteritems():
+ meta = self._old_to_new(image_metadata)
+ old = meta['name']
+ try:
+ with open(image_path) as ifile:
+ image = self.image_service.create(elevated, meta, ifile)
+ new = image['id']
+ print _("Image %(old)s converted to " \
+ "%(new)s (%(new)08x).") % locals()
+ except Exception as exc:
+ print _("Failed to convert %(old)s: %(exc)s") % locals()
+
+ def convert(self, directory):
+ """Uploads old objectstore images in directory to new service
+ arguments: directory"""
+ machine_images = {}
+ other_images = {}
+ directory = os.path.abspath(directory)
+ # NOTE(vish): If we're importing from the images path dir, attempt
+ # to move the files out of the way before importing
+ # so we aren't writing to the same directory. This
+ # may fail if the dir was a mointpoint.
+ if (FLAGS.image_service == 'nova.image.local.LocalImageService'
+ and directory == os.path.abspath(FLAGS.images_path)):
+ new_dir = "%s_bak" % directory
+ os.move(directory, new_dir)
+ os.mkdir(directory)
+ directory = new_dir
+ for fn in glob.glob("%s/*/info.json" % directory):
+ try:
+ image_path = os.path.join(fn.rpartition('/')[0], 'image')
+ with open(fn) as metadata_file:
+ image_metadata = json.load(metadata_file)
+ if image_metadata['type'] == 'machine':
+ machine_images[image_path] = image_metadata
+ else:
+ other_images[image_path] = image_metadata
+ except Exception as exc:
+ print _("Failed to load %(fn)s.") % locals()
+ # NOTE(vish): do kernels and ramdisks first so images
+ self._convert_images(other_images)
+ self._convert_images(machine_images)
+
+
CATEGORIES = [
('user', UserCommands),
+ ('account', AccountCommands),
('project', ProjectCommands),
('role', RoleCommands),
('shell', ShellCommands),
@@ -670,10 +1047,15 @@ CATEGORIES = [
('fixed', FixedIpCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
+ ('vm', VmCommands),
('service', ServiceCommands),
('log', LogCommands),
('db', DbCommands),
- ('volume', VolumeCommands)]
+ ('volume', VolumeCommands),
+ ('instance_type', InstanceTypeCommands),
+ ('image', ImageCommands),
+ ('flavor', InstanceTypeCommands),
+ ('instance', InstanceCommands)]
def lazy_match(name, key_value_tuples):
@@ -716,8 +1098,8 @@ def main():
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"
- print "Available categories:"
- for k, _ in CATEGORIES:
+ print _("Available categories:")
+ for k, _v in CATEGORIES:
print "\t%s" % k
sys.exit(2)
category = argv.pop(0)
@@ -728,7 +1110,7 @@ def main():
actions = methods_of(command_object)
if len(argv) < 1:
print script_name + " category action [<args>]"
- print "Available actions for %s category:" % category
+ print _("Available actions for %s category:") % category
for k, _v in actions:
print "\t%s" % k
sys.exit(2)
@@ -740,9 +1122,12 @@ def main():
fn(*argv)
sys.exit(0)
except TypeError:
- print "Possible wrong number of arguments supplied"
+ print _("Possible wrong number of arguments supplied")
print "%s %s: %s" % (category, action, fn.__doc__)
raise
+ except Exception:
+ print _("Command failed, please check log for more info")
+ raise
if __name__ == '__main__':
main()
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 9fbe228a2..6ef841b85 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -36,9 +36,10 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
+from nova import log as logging
from nova import utils
-from nova import twistd
-from nova.objectstore import handler
+from nova import wsgi
+from nova.objectstore import s3server
FLAGS = flags.FLAGS
@@ -46,7 +47,9 @@ FLAGS = flags.FLAGS
if __name__ == '__main__':
utils.default_flagfile()
- twistd.serve(__file__)
-
-if __name__ == '__builtin__':
- application = handler.get_application() # pylint: disable-msg=C0103
+ FLAGS(sys.argv)
+ logging.setup()
+ router = s3server.S3Application(FLAGS.buckets_path)
+ server = wsgi.Server()
+ server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+ server.wait()
diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy
new file mode 100755
index 000000000..ccb97e3a3
--- /dev/null
+++ b/bin/nova-vncproxy
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""VNC Console Proxy Server."""
+
+import eventlet
+import gettext
+import os
+import sys
+
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('nova', unicode=1)
+
+from nova import flags
+from nova import log as logging
+from nova import service
+from nova import utils
+from nova import wsgi
+from nova import version
+from nova.vnc import auth
+from nova.vnc import proxy
+
+
+LOG = logging.getLogger('nova.vnc-proxy')
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vncproxy_wwwroot', '/var/lib/nova/noVNC/',
+ 'Full path to noVNC directory')
+flags.DEFINE_boolean('vnc_debug', False,
+ 'Enable debugging features, like token bypassing')
+flags.DEFINE_integer('vncproxy_port', 6080,
+ 'Port that the VNC proxy should bind to')
+flags.DEFINE_string('vncproxy_host', '0.0.0.0',
+ 'Address that the VNC proxy should bind to')
+flags.DEFINE_integer('vnc_token_ttl', 300,
+ 'How many seconds before deleting tokens')
+flags.DEFINE_string('vncproxy_manager', 'nova.vnc.auth.VNCProxyAuthManager',
+ 'Manager for vncproxy auth')
+
+flags.DEFINE_flag(flags.HelpFlag())
+flags.DEFINE_flag(flags.HelpshortFlag())
+flags.DEFINE_flag(flags.HelpXMLFlag())
+
+
+if __name__ == "__main__":
+ utils.default_flagfile()
+ FLAGS(sys.argv)
+ logging.setup()
+
+ LOG.audit(_("Starting nova-vnc-proxy node (version %s)"),
+ version.version_string_with_vcs())
+
+ if not (os.path.exists(FLAGS.vncproxy_wwwroot) and
+ os.path.exists(FLAGS.vncproxy_wwwroot + '/vnc_auto.html')):
+ LOG.info(_("Missing vncproxy_wwwroot (version %s)"),
+ FLAGS.vncproxy_wwwroot)
+ LOG.info(_("You need a slightly modified version of noVNC "
+ "to work with the nova-vnc-proxy"))
+ LOG.info(_("Check out the most recent nova noVNC code: %s"),
+ "git://github.com/sleepsonthefloor/noVNC.git")
+ LOG.info(_("And drop it in %s"), FLAGS.vncproxy_wwwroot)
+ exit(1)
+
+ app = proxy.WebsocketVNCProxy(FLAGS.vncproxy_wwwroot)
+
+ LOG.audit(_("Allowing access to the following files: %s"),
+ app.get_whitelist())
+
+ with_logging = auth.LoggingMiddleware(app)
+
+ if FLAGS.vnc_debug:
+ with_auth = proxy.DebugMiddleware(with_logging)
+ else:
+ with_auth = auth.VNCNovaAuthMiddleware(with_logging)
+
+ service.serve()
+
+ server = wsgi.Server()
+ server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host)
+ server.wait()
diff --git a/bin/stack b/bin/stack
index 25caca06f..d84a82e27 100755
--- a/bin/stack
+++ b/bin/stack
@@ -59,11 +59,21 @@ USAGE = """usage: stack [options] <controller> <method> [arg1=value arg2=value]
def format_help(d):
"""Format help text, keys are labels and values are descriptions."""
+ MAX_INDENT = 30
indent = max([len(k) for k in d])
+ if indent > MAX_INDENT:
+ indent = MAX_INDENT - 6
+
out = []
for k, v in d.iteritems():
- t = textwrap.TextWrapper(initial_indent=' %s ' % k.ljust(indent),
- subsequent_indent=' ' * (indent + 6))
+ if (len(k) + 6) > MAX_INDENT:
+ out.extend([' %s' % k])
+ initial_indent = ' ' * (indent + 6)
+ else:
+ initial_indent = ' %s ' % k.ljust(indent)
+ subsequent_indent = ' ' * (indent + 6)
+ t = textwrap.TextWrapper(initial_indent=initial_indent,
+ subsequent_indent=subsequent_indent)
out.extend(t.wrap(v))
return out
diff --git a/contrib/boto_v6/ec2/connection.py b/contrib/boto_v6/ec2/connection.py
index 23466e5d7..868c93c11 100644
--- a/contrib/boto_v6/ec2/connection.py
+++ b/contrib/boto_v6/ec2/connection.py
@@ -4,8 +4,10 @@ Created on 2010/12/20
@author: Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
'''
import boto
+import base64
import boto.ec2
from boto_v6.ec2.instance import ReservationV6
+from boto.ec2.securitygroup import SecurityGroup
class EC2ConnectionV6(boto.ec2.EC2Connection):
@@ -39,3 +41,101 @@ class EC2ConnectionV6(boto.ec2.EC2Connection):
self.build_filter_params(params, filters)
return self.get_list('DescribeInstancesV6', params,
[('item', ReservationV6)])
+
+ def run_instances(self, image_id, min_count=1, max_count=1,
+ key_name=None, security_groups=None,
+ user_data=None, addressing_type=None,
+ instance_type='m1.small', placement=None,
+ kernel_id=None, ramdisk_id=None,
+ monitoring_enabled=False, subnet_id=None,
+ block_device_map=None):
+ """
+ Runs an image on EC2.
+
+ :type image_id: string
+ :param image_id: The ID of the image to run
+
+ :type min_count: int
+ :param min_count: The minimum number of instances to launch
+
+ :type max_count: int
+ :param max_count: The maximum number of instances to launch
+
+ :type key_name: string
+ :param key_name: The name of the key pair with which to
+ launch instances
+
+ :type security_groups: list of strings
+ :param security_groups: The names of the security groups with
+ which to associate instances
+
+ :type user_data: string
+ :param user_data: The user data passed to the launched instances
+
+ :type instance_type: string
+ :param instance_type: The type of instance to run
+ (m1.small, m1.large, m1.xlarge)
+
+ :type placement: string
+ :param placement: The availability zone in which to launch
+ the instances
+
+ :type kernel_id: string
+ :param kernel_id: The ID of the kernel with which to
+ launch the instances
+
+ :type ramdisk_id: string
+ :param ramdisk_id: The ID of the RAM disk with which to
+ launch the instances
+
+ :type monitoring_enabled: bool
+ :param monitoring_enabled: Enable CloudWatch monitoring
+ on the instance.
+
+ :type subnet_id: string
+ :param subnet_id: The subnet ID within which to launch
+ the instances for VPC.
+
+ :type block_device_map:
+ :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
+ :param block_device_map: A BlockDeviceMapping data structure
+ describing the EBS volumes associated
+ with the Image.
+
+ :rtype: Reservation
+ :return: The :class:`boto.ec2.instance.ReservationV6`
+ associated with the request for machines
+ """
+ params = {'ImageId': image_id,
+ 'MinCount': min_count,
+ 'MaxCount': max_count}
+ if key_name:
+ params['KeyName'] = key_name
+ if security_groups:
+ l = []
+ for group in security_groups:
+ if isinstance(group, SecurityGroup):
+ l.append(group.name)
+ else:
+ l.append(group)
+ self.build_list_params(params, l, 'SecurityGroup')
+ if user_data:
+ params['UserData'] = base64.b64encode(user_data)
+ if addressing_type:
+ params['AddressingType'] = addressing_type
+ if instance_type:
+ params['InstanceType'] = instance_type
+ if placement:
+ params['Placement.AvailabilityZone'] = placement
+ if kernel_id:
+ params['KernelId'] = kernel_id
+ if ramdisk_id:
+ params['RamdiskId'] = ramdisk_id
+ if monitoring_enabled:
+ params['Monitoring.Enabled'] = 'true'
+ if subnet_id:
+ params['SubnetId'] = subnet_id
+ if block_device_map:
+ block_device_map.build_list_params(params)
+ return self.get_object('RunInstances', params,
+ ReservationV6, verb='POST')
diff --git a/contrib/nova.sh b/contrib/nova.sh
index 1187f2728..d7d34dcbd 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -18,6 +18,9 @@ if [ ! -n "$HOST_IP" ]; then
fi
USE_MYSQL=${USE_MYSQL:-0}
+INTERFACE=${INTERFACE:-eth0}
+FLOATING_RANGE=${FLOATING_RANGE:-10.6.0.0/27}
+FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
MYSQL_PASS=${MYSQL_PASS:-nova}
TEST=${TEST:-0}
USE_LDAP=${USE_LDAP:-0}
@@ -72,10 +75,14 @@ if [ "$CMD" == "install" ]; then
sudo modprobe kvm
sudo /etc/init.d/libvirt-bin restart
sudo modprobe nbd
- sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
- sudo apt-get install -y python-migrate python-eventlet python-gflags python-ipy python-tempita
- sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah
- sudo apt-get install -y python-netaddr python-paste python-pastedeploy python-glance
+ sudo apt-get install -y python-twisted python-mox python-ipy python-paste
+ sudo apt-get install -y python-migrate python-gflags python-greenlet
+ sudo apt-get install -y python-libvirt python-libxml2 python-routes
+ sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet
+ sudo apt-get install -y python-novaclient python-glance python-cheetah
+ sudo apt-get install -y python-carrot python-tempita python-sqlalchemy
+ sudo apt-get install -y python-suds
+
if [ "$USE_IPV6" == 1 ]; then
sudo apt-get install -y radvd
@@ -104,7 +111,7 @@ function screen_it {
screen -S nova -p $1 -X stuff "$2$NL"
}
-if [ "$CMD" == "run" ]; then
+if [ "$CMD" == "run" ] || [ "$CMD" == "run_detached" ]; then
cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF
--verbose
@@ -112,6 +119,8 @@ if [ "$CMD" == "run" ]; then
--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
--network_manager=nova.network.manager.$NET_MAN
--my_ip=$HOST_IP
+--public_interface=$INTERFACE
+--vlan_interface=$INTERFACE
--sql_connection=$SQL_CONN
--auth_driver=nova.auth.$AUTH
--libvirt_type=$LIBVIRT_TYPE
@@ -166,14 +175,14 @@ NOVA_CONF_EOF
$NOVA_DIR/bin/nova-manage user admin admin admin admin
# create a project called 'admin' with project manager of 'admin'
$NOVA_DIR/bin/nova-manage project create admin admin
- # export environment variables for project 'admin' and user 'admin'
- $NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
- unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
# create a small network
- $NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
+ $NOVA_DIR/bin/nova-manage network create $FIXED_RANGE 1 32
# create some floating ips
- $NOVA_DIR/bin/nova-manage floating create `hostname` 10.6.0.0/27
+ $NOVA_DIR/bin/nova-manage floating create `hostname` $FLOATING_RANGE
+
+ # convert old images
+ $NOVA_DIR/bin/nova-manage image convert $DIR/images
# nova api crashes if we start it with a regular screen command,
# so send the start command by forcing text into the window.
@@ -184,8 +193,15 @@ NOVA_CONF_EOF
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
screen_it volume "$NOVA_DIR/bin/nova-volume"
screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
- screen_it test ". $NOVA_DIR/novarc"
- screen -S nova -x
+ sleep 2
+ # export environment variables for project 'admin' and user 'admin'
+ $NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
+ unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
+
+ screen_it test "export PATH=$NOVA_DIR/bin:$PATH;. $NOVA_DIR/novarc"
+ if [ "$CMD" != "run_detached" ]; then
+ screen -S nova -x
+ fi
fi
if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then
diff --git a/doc/.autogenerated b/doc/.autogenerated
new file mode 100644
index 000000000..456c8ad1e
--- /dev/null
+++ b/doc/.autogenerated
@@ -0,0 +1,283 @@
+source/api/nova..adminclient.rst
+source/api/nova..api.direct.rst
+source/api/nova..api.ec2.admin.rst
+source/api/nova..api.ec2.apirequest.rst
+source/api/nova..api.ec2.cloud.rst
+source/api/nova..api.ec2.metadatarequesthandler.rst
+source/api/nova..api.openstack.auth.rst
+source/api/nova..api.openstack.backup_schedules.rst
+source/api/nova..api.openstack.common.rst
+source/api/nova..api.openstack.consoles.rst
+source/api/nova..api.openstack.faults.rst
+source/api/nova..api.openstack.flavors.rst
+source/api/nova..api.openstack.images.rst
+source/api/nova..api.openstack.servers.rst
+source/api/nova..api.openstack.shared_ip_groups.rst
+source/api/nova..api.openstack.zones.rst
+source/api/nova..auth.dbdriver.rst
+source/api/nova..auth.fakeldap.rst
+source/api/nova..auth.ldapdriver.rst
+source/api/nova..auth.manager.rst
+source/api/nova..auth.signer.rst
+source/api/nova..cloudpipe.pipelib.rst
+source/api/nova..compute.api.rst
+source/api/nova..compute.instance_types.rst
+source/api/nova..compute.manager.rst
+source/api/nova..compute.monitor.rst
+source/api/nova..compute.power_state.rst
+source/api/nova..console.api.rst
+source/api/nova..console.fake.rst
+source/api/nova..console.manager.rst
+source/api/nova..console.xvp.rst
+source/api/nova..context.rst
+source/api/nova..crypto.rst
+source/api/nova..db.api.rst
+source/api/nova..db.base.rst
+source/api/nova..db.migration.rst
+source/api/nova..db.sqlalchemy.api.rst
+source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
+source/api/nova..db.sqlalchemy.migration.rst
+source/api/nova..db.sqlalchemy.models.rst
+source/api/nova..db.sqlalchemy.session.rst
+source/api/nova..exception.rst
+source/api/nova..fakememcache.rst
+source/api/nova..fakerabbit.rst
+source/api/nova..flags.rst
+source/api/nova..image.glance.rst
+source/api/nova..image.local.rst
+source/api/nova..image.s3.rst
+source/api/nova..image.service.rst
+source/api/nova..log.rst
+source/api/nova..manager.rst
+source/api/nova..network.api.rst
+source/api/nova..network.linux_net.rst
+source/api/nova..network.manager.rst
+source/api/nova..objectstore.bucket.rst
+source/api/nova..objectstore.handler.rst
+source/api/nova..objectstore.image.rst
+source/api/nova..objectstore.stored.rst
+source/api/nova..quota.rst
+source/api/nova..rpc.rst
+source/api/nova..scheduler.chance.rst
+source/api/nova..scheduler.driver.rst
+source/api/nova..scheduler.manager.rst
+source/api/nova..scheduler.simple.rst
+source/api/nova..scheduler.zone.rst
+source/api/nova..service.rst
+source/api/nova..test.rst
+source/api/nova..tests.api.openstack.fakes.rst
+source/api/nova..tests.api.openstack.test_adminapi.rst
+source/api/nova..tests.api.openstack.test_api.rst
+source/api/nova..tests.api.openstack.test_auth.rst
+source/api/nova..tests.api.openstack.test_common.rst
+source/api/nova..tests.api.openstack.test_faults.rst
+source/api/nova..tests.api.openstack.test_flavors.rst
+source/api/nova..tests.api.openstack.test_images.rst
+source/api/nova..tests.api.openstack.test_ratelimiting.rst
+source/api/nova..tests.api.openstack.test_servers.rst
+source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
+source/api/nova..tests.api.openstack.test_zones.rst
+source/api/nova..tests.api.test_wsgi.rst
+source/api/nova..tests.db.fakes.rst
+source/api/nova..tests.declare_flags.rst
+source/api/nova..tests.fake_flags.rst
+source/api/nova..tests.glance.stubs.rst
+source/api/nova..tests.hyperv_unittest.rst
+source/api/nova..tests.objectstore_unittest.rst
+source/api/nova..tests.real_flags.rst
+source/api/nova..tests.runtime_flags.rst
+source/api/nova..tests.test_access.rst
+source/api/nova..tests.test_api.rst
+source/api/nova..tests.test_auth.rst
+source/api/nova..tests.test_cloud.rst
+source/api/nova..tests.test_compute.rst
+source/api/nova..tests.test_console.rst
+source/api/nova..tests.test_direct.rst
+source/api/nova..tests.test_flags.rst
+source/api/nova..tests.test_instance_types.rst
+source/api/nova..tests.test_localization.rst
+source/api/nova..tests.test_log.rst
+source/api/nova..tests.test_middleware.rst
+source/api/nova..tests.test_misc.rst
+source/api/nova..tests.test_network.rst
+source/api/nova..tests.test_quota.rst
+source/api/nova..tests.test_rpc.rst
+source/api/nova..tests.test_scheduler.rst
+source/api/nova..tests.test_service.rst
+source/api/nova..tests.test_test.rst
+source/api/nova..tests.test_twistd.rst
+source/api/nova..tests.test_utils.rst
+source/api/nova..tests.test_virt.rst
+source/api/nova..tests.test_volume.rst
+source/api/nova..tests.test_xenapi.rst
+source/api/nova..tests.xenapi.stubs.rst
+source/api/nova..twistd.rst
+source/api/nova..utils.rst
+source/api/nova..version.rst
+source/api/nova..virt.connection.rst
+source/api/nova..virt.disk.rst
+source/api/nova..virt.fake.rst
+source/api/nova..virt.hyperv.rst
+source/api/nova..virt.images.rst
+source/api/nova..virt.libvirt_conn.rst
+source/api/nova..virt.xenapi.fake.rst
+source/api/nova..virt.xenapi.network_utils.rst
+source/api/nova..virt.xenapi.vm_utils.rst
+source/api/nova..virt.xenapi.vmops.rst
+source/api/nova..virt.xenapi.volume_utils.rst
+source/api/nova..virt.xenapi.volumeops.rst
+source/api/nova..virt.xenapi_conn.rst
+source/api/nova..volume.api.rst
+source/api/nova..volume.driver.rst
+source/api/nova..volume.manager.rst
+source/api/nova..volume.san.rst
+source/api/nova..wsgi.rst
+source/api/autoindex.rst
+source/api/nova..adminclient.rst
+source/api/nova..api.direct.rst
+source/api/nova..api.ec2.admin.rst
+source/api/nova..api.ec2.apirequest.rst
+source/api/nova..api.ec2.cloud.rst
+source/api/nova..api.ec2.metadatarequesthandler.rst
+source/api/nova..api.openstack.auth.rst
+source/api/nova..api.openstack.backup_schedules.rst
+source/api/nova..api.openstack.common.rst
+source/api/nova..api.openstack.consoles.rst
+source/api/nova..api.openstack.faults.rst
+source/api/nova..api.openstack.flavors.rst
+source/api/nova..api.openstack.images.rst
+source/api/nova..api.openstack.servers.rst
+source/api/nova..api.openstack.shared_ip_groups.rst
+source/api/nova..api.openstack.zones.rst
+source/api/nova..auth.dbdriver.rst
+source/api/nova..auth.fakeldap.rst
+source/api/nova..auth.ldapdriver.rst
+source/api/nova..auth.manager.rst
+source/api/nova..auth.signer.rst
+source/api/nova..cloudpipe.pipelib.rst
+source/api/nova..compute.api.rst
+source/api/nova..compute.instance_types.rst
+source/api/nova..compute.manager.rst
+source/api/nova..compute.monitor.rst
+source/api/nova..compute.power_state.rst
+source/api/nova..console.api.rst
+source/api/nova..console.fake.rst
+source/api/nova..console.manager.rst
+source/api/nova..console.xvp.rst
+source/api/nova..context.rst
+source/api/nova..crypto.rst
+source/api/nova..db.api.rst
+source/api/nova..db.base.rst
+source/api/nova..db.migration.rst
+source/api/nova..db.sqlalchemy.api.rst
+source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
+source/api/nova..db.sqlalchemy.migration.rst
+source/api/nova..db.sqlalchemy.models.rst
+source/api/nova..db.sqlalchemy.session.rst
+source/api/nova..exception.rst
+source/api/nova..fakememcache.rst
+source/api/nova..fakerabbit.rst
+source/api/nova..flags.rst
+source/api/nova..image.glance.rst
+source/api/nova..image.local.rst
+source/api/nova..image.s3.rst
+source/api/nova..image.service.rst
+source/api/nova..log.rst
+source/api/nova..manager.rst
+source/api/nova..network.api.rst
+source/api/nova..network.linux_net.rst
+source/api/nova..network.manager.rst
+source/api/nova..objectstore.bucket.rst
+source/api/nova..objectstore.handler.rst
+source/api/nova..objectstore.image.rst
+source/api/nova..objectstore.stored.rst
+source/api/nova..quota.rst
+source/api/nova..rpc.rst
+source/api/nova..scheduler.chance.rst
+source/api/nova..scheduler.driver.rst
+source/api/nova..scheduler.manager.rst
+source/api/nova..scheduler.simple.rst
+source/api/nova..scheduler.zone.rst
+source/api/nova..service.rst
+source/api/nova..test.rst
+source/api/nova..tests.api.openstack.fakes.rst
+source/api/nova..tests.api.openstack.test_adminapi.rst
+source/api/nova..tests.api.openstack.test_api.rst
+source/api/nova..tests.api.openstack.test_auth.rst
+source/api/nova..tests.api.openstack.test_common.rst
+source/api/nova..tests.api.openstack.test_faults.rst
+source/api/nova..tests.api.openstack.test_flavors.rst
+source/api/nova..tests.api.openstack.test_images.rst
+source/api/nova..tests.api.openstack.test_ratelimiting.rst
+source/api/nova..tests.api.openstack.test_servers.rst
+source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
+source/api/nova..tests.api.openstack.test_zones.rst
+source/api/nova..tests.api.test_wsgi.rst
+source/api/nova..tests.db.fakes.rst
+source/api/nova..tests.declare_flags.rst
+source/api/nova..tests.fake_flags.rst
+source/api/nova..tests.glance.stubs.rst
+source/api/nova..tests.hyperv_unittest.rst
+source/api/nova..tests.objectstore_unittest.rst
+source/api/nova..tests.real_flags.rst
+source/api/nova..tests.runtime_flags.rst
+source/api/nova..tests.test_access.rst
+source/api/nova..tests.test_api.rst
+source/api/nova..tests.test_auth.rst
+source/api/nova..tests.test_cloud.rst
+source/api/nova..tests.test_compute.rst
+source/api/nova..tests.test_console.rst
+source/api/nova..tests.test_direct.rst
+source/api/nova..tests.test_flags.rst
+source/api/nova..tests.test_instance_types.rst
+source/api/nova..tests.test_localization.rst
+source/api/nova..tests.test_log.rst
+source/api/nova..tests.test_middleware.rst
+source/api/nova..tests.test_misc.rst
+source/api/nova..tests.test_network.rst
+source/api/nova..tests.test_quota.rst
+source/api/nova..tests.test_rpc.rst
+source/api/nova..tests.test_scheduler.rst
+source/api/nova..tests.test_service.rst
+source/api/nova..tests.test_test.rst
+source/api/nova..tests.test_twistd.rst
+source/api/nova..tests.test_utils.rst
+source/api/nova..tests.test_virt.rst
+source/api/nova..tests.test_volume.rst
+source/api/nova..tests.test_xenapi.rst
+source/api/nova..tests.xenapi.stubs.rst
+source/api/nova..twistd.rst
+source/api/nova..utils.rst
+source/api/nova..version.rst
+source/api/nova..virt.connection.rst
+source/api/nova..virt.disk.rst
+source/api/nova..virt.fake.rst
+source/api/nova..virt.hyperv.rst
+source/api/nova..virt.images.rst
+source/api/nova..virt.libvirt_conn.rst
+source/api/nova..virt.xenapi.fake.rst
+source/api/nova..virt.xenapi.network_utils.rst
+source/api/nova..virt.xenapi.vm_utils.rst
+source/api/nova..virt.xenapi.vmops.rst
+source/api/nova..virt.xenapi.volume_utils.rst
+source/api/nova..virt.xenapi.volumeops.rst
+source/api/nova..virt.xenapi_conn.rst
+source/api/nova..volume.api.rst
+source/api/nova..volume.driver.rst
+source/api/nova..volume.manager.rst
+source/api/nova..volume.san.rst
+source/api/nova..wsgi.rst
diff --git a/doc/build/html/.buildinfo b/doc/build/html/.buildinfo
new file mode 100644
index 000000000..091736d4f
--- /dev/null
+++ b/doc/build/html/.buildinfo
@@ -0,0 +1,4 @@
+# Sphinx build info version 1
+# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: 2a2fe6198f4be4a4d6f289b09d16d74a
+tags: fbb0d17656682115ca4d033fb2f83ba1
diff --git a/doc/ext/nova_autodoc.py b/doc/ext/nova_autodoc.py
index 5429bb656..3dd992d84 100644
--- a/doc/ext/nova_autodoc.py
+++ b/doc/ext/nova_autodoc.py
@@ -8,5 +8,6 @@ from nova import utils
def setup(app):
rootdir = os.path.abspath(app.srcdir + '/..')
print "**Autodocumenting from %s" % rootdir
- rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir)
+ os.chdir(rootdir)
+ rv = utils.execute('./generate_autodoc_index.sh')
print rv[0]
diff --git a/doc/source/_static/tweaks.css b/doc/source/_static/tweaks.css
index 1a18dbac6..7c57c8f35 100644
--- a/doc/source/_static/tweaks.css
+++ b/doc/source/_static/tweaks.css
@@ -69,3 +69,150 @@ table.docutils {
.tweet_list li .tweet_avatar {
float: left;
}
+
+/* ------------------------------------------
+PURE CSS SPEECH BUBBLES
+by Nicolas Gallagher
+- http://nicolasgallagher.com/pure-css-speech-bubbles/
+
+http://nicolasgallagher.com
+http://twitter.com/necolas
+
+Created: 02 March 2010
+Version: 1.1 (21 October 2010)
+
+Dual licensed under MIT and GNU GPLv2 © Nicolas Gallagher
+------------------------------------------ */
+/* THE SPEECH BUBBLE
+------------------------------------------------------------------------------------------------------------------------------- */
+
+/* THE SPEECH BUBBLE
+------------------------------------------------------------------------------------------------------------------------------- */
+
+.triangle-border {
+ position:relative;
+ padding:15px;
+ margin:1em 0 3em;
+ border:5px solid #BC1518;
+ color:#333;
+ background:#fff;
+
+ /* css3 */
+ -moz-border-radius:10px;
+ -webkit-border-radius:10px;
+ border-radius:10px;
+}
+
+/* Variant : for left positioned triangle
+------------------------------------------ */
+
+.triangle-border.left {
+ margin-left:30px;
+}
+
+/* Variant : for right positioned triangle
+------------------------------------------ */
+
+.triangle-border.right {
+ margin-right:30px;
+}
+
+/* THE TRIANGLE
+------------------------------------------------------------------------------------------------------------------------------- */
+
+.triangle-border:before {
+ content:"";
+ display:block; /* reduce the damage in FF3.0 */
+ position:absolute;
+ bottom:-40px; /* value = - border-top-width - border-bottom-width */
+ left:40px; /* controls horizontal position */
+ width:0;
+ height:0;
+ border:20px solid transparent;
+ border-top-color:#BC1518;
+}
+
+/* creates the smaller triangle */
+.triangle-border:after {
+ content:"";
+ display:block; /* reduce the damage in FF3.0 */
+ position:absolute;
+ bottom:-26px; /* value = - border-top-width - border-bottom-width */
+ left:47px; /* value = (:before left) + (:before border-left) - (:after border-left) */
+ width:0;
+ height:0;
+ border:13px solid transparent;
+ border-top-color:#fff;
+}
+
+/* Variant : top
+------------------------------------------ */
+
+/* creates the larger triangle */
+.triangle-border.top:before {
+ top:-40px; /* value = - border-top-width - border-bottom-width */
+ right:40px; /* controls horizontal position */
+ bottom:auto;
+ left:auto;
+ border:20px solid transparent;
+ border-bottom-color:#BC1518;
+}
+
+/* creates the smaller triangle */
+.triangle-border.top:after {
+ top:-26px; /* value = - border-top-width - border-bottom-width */
+ right:47px; /* value = (:before right) + (:before border-right) - (:after border-right) */
+ bottom:auto;
+ left:auto;
+ border:13px solid transparent;
+ border-bottom-color:#fff;
+}
+
+/* Variant : left
+------------------------------------------ */
+
+/* creates the larger triangle */
+.triangle-border.left:before {
+ top:10px; /* controls vertical position */
+ left:-30px; /* value = - border-left-width - border-right-width */
+ bottom:auto;
+ border-width:15px 30px 15px 0;
+ border-style:solid;
+ border-color:transparent #BC1518;
+}
+
+/* creates the smaller triangle */
+.triangle-border.left:after {
+ top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */
+ left:-21px; /* value = - border-left-width - border-right-width */
+ bottom:auto;
+ border-width:9px 21px 9px 0;
+ border-style:solid;
+ border-color:transparent #fff;
+}
+
+/* Variant : right
+------------------------------------------ */
+
+/* creates the larger triangle */
+.triangle-border.right:before {
+ top:10px; /* controls vertical position */
+ right:-30px; /* value = - border-left-width - border-right-width */
+ bottom:auto;
+ left:auto;
+ border-width:15px 0 15px 30px;
+ border-style:solid;
+ border-color:transparent #BC1518;
+}
+
+/* creates the smaller triangle */
+.triangle-border.right:after {
+ top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */
+ right:-21px; /* value = - border-left-width - border-right-width */
+ bottom:auto;
+ left:auto;
+ border-width:9px 0 9px 21px;
+ border-style:solid;
+ border-color:transparent #fff;
+}
+
diff --git a/doc/source/_theme/layout.html b/doc/source/_theme/layout.html
index e3eb54b71..0a37a7943 100644
--- a/doc/source/_theme/layout.html
+++ b/doc/source/_theme/layout.html
@@ -71,12 +71,21 @@
</p>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
+
+ <p class="triangle-border right">
+ Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read <a href="http://nova.openstack.org/2011.1">Nova 2011.1 docs</a> or <a href="http://docs.openstack.org">all OpenStack docs</a> too.
+ </p>
+
{%- endif %}
{%- if pagename == "index" %}
- <h3>{{ _('Twitter Feed') }}</h3>
+
+
+ <h3>{{ _('Twitter Feed') }}</h3>
<div id="twitter_feed" class='twitter_feed'></div>
{%- endif %}
+
+
{%- endblock %}
diff --git a/doc/source/adminguide/distros/others.rst b/doc/source/adminguide/distros/others.rst
deleted file mode 100644
index ec14a9abb..000000000
--- a/doc/source/adminguide/distros/others.rst
+++ /dev/null
@@ -1,88 +0,0 @@
-Installation on other distros (like Debian, Fedora or CentOS )
-==============================================================
-
-Feel free to add additional notes for additional distributions.
-
-Nova installation on CentOS 5.5
--------------------------------
-
-These are notes for installing OpenStack Compute on CentOS 5.5 and will be updated but are NOT final. Please test for accuracy and edit as you see fit.
-
-The principle botleneck for running nova on centos in python 2.6. Nova is written in python 2.6 and CentOS 5.5. comes with python 2.4. We can not update python system wide as some core utilities (like yum) is dependent on python 2.4. Also very few python 2.6 modules are available in centos/epel repos.
-
-Pre-reqs
---------
-
-Add euca2ools and EPEL repo first.::
-
- cat >/etc/yum.repos.d/euca2ools.repo << EUCA_REPO_CONF_EOF
- [eucalyptus]
- name=euca2ools
- baseurl=http://www.eucalyptussoftware.com/downloads/repo/euca2ools/1.3.1/yum/centos/
- enabled=1
- gpgcheck=0
-
- EUCA_REPO_CONF_EOF
-
-::
-
- rpm -Uvh 'http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm'
-
-Now install python2.6, kvm and few other libraries through yum::
-
- yum -y install dnsmasq vblade kpartx kvm gawk iptables ebtables bzr screen euca2ools curl rabbitmq-server gcc gcc-c++ autoconf automake swig openldap openldap-servers nginx python26 python26-devel python26-distribute git openssl-devel python26-tools mysql-server qemu kmod-kvm libxml2 libxslt libxslt-devel mysql-devel
-
-Then download the latest aoetools and then build(and install) it, check for the latest version on sourceforge, exact url will change if theres a new release::
-
- wget -c http://sourceforge.net/projects/aoetools/files/aoetools/32/aoetools-32.tar.gz/download
- tar -zxvf aoetools-32.tar.gz
- cd aoetools-32
- make
- make install
-
-Add the udev rules for aoetools::
-
- cat > /etc/udev/rules.d/60-aoe.rules << AOE_RULES_EOF
- SUBSYSTEM=="aoe", KERNEL=="discover", NAME="etherd/%k", GROUP="disk", MODE="0220"
- SUBSYSTEM=="aoe", KERNEL=="err", NAME="etherd/%k", GROUP="disk", MODE="0440"
- SUBSYSTEM=="aoe", KERNEL=="interfaces", NAME="etherd/%k", GROUP="disk", MODE="0220"
- SUBSYSTEM=="aoe", KERNEL=="revalidate", NAME="etherd/%k", GROUP="disk", MODE="0220"
- # aoe block devices
- KERNEL=="etherd*", NAME="%k", GROUP="disk"
- AOE_RULES_EOF
-
-Load the kernel modules::
-
- modprobe aoe
-
-::
-
- modprobe kvm
-
-Now, install the python modules using easy_install-2.6, this ensures the installation are done against python 2.6
-
-
-easy_install-2.6 twisted sqlalchemy mox greenlet carrot daemon eventlet tornado IPy routes lxml MySQL-python
-python-gflags need to be downloaded and installed manually, use these commands (check the exact url for newer releases ):
-
-::
-
- wget -c "http://python-gflags.googlecode.com/files/python-gflags-1.4.tar.gz"
- tar -zxvf python-gflags-1.4.tar.gz
- cd python-gflags-1.4
- python2.6 setup.py install
- cd ..
-
-Same for python2.6-libxml2 module, notice the --with-python and --prefix flags. --with-python ensures we are building it against python2.6 (otherwise it will build against python2.4, which is default)::
-
- wget -c "ftp://xmlsoft.org/libxml2/libxml2-2.7.3.tar.gz"
- tar -zxvf libxml2-2.7.3.tar.gz
- cd libxml2-2.7.3
- ./configure --with-python=/usr/bin/python26 --prefix=/usr
- make all
- make install
- cd python
- python2.6 setup.py install
- cd ..
-
-Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst
deleted file mode 100644
index bd0693c46..000000000
--- a/doc/source/adminguide/distros/ubuntu.10.04.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-Installing on Ubuntu 10.04 (Lucid)
-==================================
-
-Step 1: Install dependencies
-----------------------------
-Grab the latest code from launchpad:
-
-::
-
- bzr clone lp:nova
-
-Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (when using Debian, edit nova.sh to have USE_PPA=0):
-
-.. todo:: give a link to a stable releases page
-
-Step 2: Install dependencies
-----------------------------
-
-Nova requires rabbitmq for messaging, so install that first.
-
-*Note:* You must have sudo installed to run these commands as shown here.
-
-::
-
- sudo apt-get install rabbitmq-server
-
-
-You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
-
-If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gflags which is included in the OpenStack PPA.
-
-::
-
- sudo apt-get install python-software-properties
- sudo add-apt-repository ppa:nova-core/trunk
- sudo apt-get update
- sudo apt-get install python-twisted python-gflags
-
-
-Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/distros/ubuntu.10.10.rst b/doc/source/adminguide/distros/ubuntu.10.10.rst
deleted file mode 100644
index a3fa2def1..000000000
--- a/doc/source/adminguide/distros/ubuntu.10.10.rst
+++ /dev/null
@@ -1,41 +0,0 @@
-Installing on Ubuntu 10.10 (Maverick)
-=====================================
-Single Machine Installation (Ubuntu 10.10)
-
-While we wouldn't expect you to put OpenStack Compute into production on a non-LTS version of Ubuntu, these instructions are up-to-date with the latest version of Ubuntu.
-
-Make sure you are running Ubuntu 10.10 so that the packages will be available. This install requires more than 70 MB of free disk space.
-
-These instructions are based on Soren Hansen's blog entry, Openstack on Maverick. A script is in progress as well.
-
-Step 1: Install required prerequisites
---------------------------------------
-Nova requires rabbitmq for messaging and redis for storing state (for now), so we'll install these first.::
-
- sudo apt-get install rabbitmq-server redis-server
-
-You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
-
-Step 2: Install Nova packages available in Maverick Meerkat
------------------------------------------------------------
-Type or copy/paste in the following line to get the packages that you use to run OpenStack Compute.::
-
- sudo apt-get install python-nova
- sudo apt-get install nova-api nova-objectstore nova-compute nova-scheduler nova-network euca2ools unzip
-
-You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. This operation may take a while as many dependent packages will be installed. Note: there is a dependency problem with python-nova which can be worked around by installing first.
-
-When the installation is complete, you'll see the following lines confirming:::
-
- Adding system user `nova' (UID 106) ...
- Adding new user `nova' (UID 106) with group `nogroup' ...
- Not creating home directory `/var/lib/nova'.
- Setting up nova-scheduler (0.9.1~bzr331-0ubuntu2) ...
- * Starting nova scheduler nova-scheduler
- WARNING:root:Starting scheduler node
- ...done.
- Processing triggers for libc-bin ...
- ldconfig deferred processing now taking place
- Processing triggers for python-support ...
-
-Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/flags.rst b/doc/source/adminguide/flags.rst
deleted file mode 100644
index 072f0a1a5..000000000
--- a/doc/source/adminguide/flags.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Flags and Flagfiles
-===================
-
-* python-gflags
-* flagfiles
-* list of flags by component (see concepts list)
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
deleted file mode 100644
index c53455e3e..000000000
--- a/doc/source/adminguide/multi.node.install.rst
+++ /dev/null
@@ -1,392 +0,0 @@
-
-Installing Nova on Multiple Servers
-===================================
-
-When you move beyond evaluating the technology and into building an actual
-production environment, you will need to know how to configure your datacenter
-and how to deploy components across your clusters. This guide should help you
-through that process.
-
-You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
-
-This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
-
-For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node.
-
-Requirements for a multi-node installation
-------------------------------------------
-
-* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though.
-* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
-* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
-
-Assumptions
------------
-
-* Networking is configured between/through the physical machines on a single subnet.
-* Installation and execution are both performed by ROOT user.
-
-
-Scripted Installation
----------------------
-A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node.
-
-You must run these scripts with root permissions.
-
-From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/.
-
-::
-
- wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/nova-CC-install-v1.1.sh
-
-Ensure you can execute the script by modifying the permissions on the script file.
-
-::
-
- sudo chmod 755 nova-CC-install-v1.1.sh
-
-
-::
-
- sudo ./nova-CC-install-v1.1.sh
-
-Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. You can use the nova-NODE-installer.sh script from the above github-hosted project for the compute node installation.
-
-Copy the nova.conf from the cloud controller node to the compute node.
-
-Restart related services::
-
- libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
-
-You can go to the `Configuration section`_ for next steps.
-
-Manual Installation - Step-by-Step
-----------------------------------
-The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only.
-
-Cloud Controller Installation
-`````````````````````````````
-On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_.
-
-Step 1 - Use apt-get to get the latest code
--------------------------------------------
-
-1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The ‘python-software-properties’ package is a pre-requisite for setting up the nova package repo:
-
-::
-
- sudo apt-get install python-software-properties
- sudo add-apt-repository ppa:nova-core/trunk
-
-2. Run update.
-
-::
-
- sudo apt-get update
-
-3. Install python required packages, nova-packages, and helper apps.
-
-::
-
- sudo apt-get install python-greenlet python-mysqldb python-nova nova-common nova-doc nova-api nova-network nova-objectstore nova-scheduler nova-compute euca2ools unzip
-
-It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
-
-Step 2 Set up configuration file (installed in /etc/nova)
----------------------------------------------------------
-
-1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
-
-::
-
---daemonize=1
---dhcpbridge_flagfile=/etc/nova/nova.conf
---dhcpbridge=/usr/bin/nova-dhcpbridge
---logdir=/var/log/nova
---state_path=/var/lib/nova
-
-The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
-
---sql_connection ### Location of Nova SQL DB
-
---s3_host ### This is where Nova is hosting the objectstore service, which will contain the VM images and buckets
-
---rabbit_host ### This is where the rabbit AMQP messaging service is hosted
-
---cc_host ### This is where the the nova-api service lives
-
---verbose ### Optional but very helpful during initial setup
-
---ec2_url ### The location to interface nova-api
-
---network_manager ### Many options here, discussed below. This is how your controller will communicate with additional Nova nodes and VMs:
-
-nova.network.manager.FlatManager # Simple, no-vlan networking type
-nova.network.manager. FlatDHCPManager # Flat networking with DHCP
-nova.network.manager.VlanManager # Vlan networking with DHCP – /DEFAULT/ if no network manager is defined in nova.conf
-
---fixed_range=<network/prefix> ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12
-
---network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000
-
-The following code can be cut and paste, and edited to your setup:
-
-Note: CC_ADDR=<the external IP address of your cloud controller>
-
-Detailed explanation of the following example is available above.
-
-::
-
---sql_connection=mysql://root:nova@<CC_ADDR>/nova
---s3_host=<CC_ADDR>
---rabbit_host=<CC_ADDR>
---cc_host=<CC_ADDR>
---verbose
---ec2_url=http://<CC_ADDR>:8773/services/Cloud
---network_manager=nova.network.manager.VlanManager
---fixed_range=<network/prefix>
---network_size=<# of addrs>
-
-2. Create a “nova” group, and set permissions::
-
- addgroup nova
-
-The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
-
- chown -R root:nova /etc/nova
- chmod 644 /etc/nova/nova.conf
-
-Step 3 - Setup the SQL DB (MySQL for this setup)
-------------------------------------------------
-
-1. First you 'preseed' to bypass all the installation prompts::
-
- bash
- MYSQL_PASS=nova
- cat <<MYSQL_PRESEED | debconf-set-selections
- mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
- mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
- mysql-server-5.1 mysql-server/start_on_boot boolean true
- MYSQL_PRESEED
-
-2. Install MySQL::
-
- apt-get install -y mysql-server
-
-3. Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost to any::
-
- sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
- service mysql restart
-
-4. MySQL DB configuration:
-
-Create NOVA database::
-
- mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
-
-Update the DB to include user 'root'@'%' with super user privileges::
-
- mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
-
-Set mySQL root password::
-
- mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
-
-Compute Node Installation
-`````````````````````````
-
-Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node.
-
-Network Configuration
----------------------
-
-If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically.
-
-Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
-
- < begin /etc/network/interfaces >
- # The loopback network interface
- auto lo
- iface lo inet loopback
-
- # Networking for NOVA
- auto br100
-
- iface br100 inet dhcp
- bridge_ports eth0
- bridge_stp off
- bridge_maxwait 0
- bridge_fd 0
- < end /etc/network/interfaces >
-
-Next, restart networking to apply the changes::
-
- sudo /etc/init.d/networking restart
-
-Configuration
-`````````````
-
-On the Compute node, you should continue with these configuration steps.
-
-Step 1 - Set up the Nova environment
-------------------------------------
-
-These are the commands you run to update the database if needed, and then set up a user and project::
-
- /usr/bin/python /usr/bin/nova-manage db sync
- /usr/bin/python /usr/bin/nova-manage user admin <user_name>
- /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
- /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
-
-Here is an example of what this looks like with real data::
-
- /usr/bin/python /usr/bin/nova-manage db sync
- /usr/bin/python /usr/bin/nova-manage user admin dub
- /usr/bin/python /usr/bin/nova-manage project create dubproject dub
- /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
-
-(I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.)
-
-Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
-
-On running the "nova-manage network create" command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. You only need to mark the network as a bridge if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
-
-
-Step 2 - Create Nova certifications
------------------------------------
-
-1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
-
-::
-
- mkdir –p /root/creds
- /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip
-
-2. Unzip them in your home directory, and add them to your environment.
-
-::
-
- unzip /root/creds/novacreds.zip -d /root/creds/
- cat /root/creds/novarc >> ~/.bashrc
- source ~/.bashrc
-
-Step 3 - Restart all relevant services
---------------------------------------
-
-Restart all six services in total, just to cover the entire spectrum::
-
- libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
-
-Step 4 - Closing steps, and cleaning up
----------------------------------------
-
-One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
-
- euca-authorize -P icmp -t -1:-1 default
- euca-authorize -P tcp -p 22 default
-
-Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following::
-
- killall dnsmasq
- service nova-network restart
-
-To avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally::
-
- chgrp kvm /dev/kvm
- chmod g+rwx /dev/kvm
-
-If you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step::
-
- # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773
-
-Testing the Installation
-````````````````````````
-
-You can confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query::
-
- mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'
-
-In return, you should see something similar to this::
- +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
- | created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone |
- +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
- | 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova |
- | 2011-01-28 22:52:48 | 2011-02-03 06:55:57 | NULL | 0 | 2 | osdemo02 | nova-compute | compute | 46056 | 0 | nova |
- | 2011-01-28 22:52:52 | 2011-02-03 06:55:50 | NULL | 0 | 3 | osdemo02 | nova-scheduler | scheduler | 46065 | 0 | nova |
- | 2011-01-29 23:49:29 | 2011-02-03 06:54:26 | NULL | 0 | 4 | osdemo01 | nova-compute | compute | 37050 | 0 | nova |
- | 2011-01-30 23:42:24 | 2011-02-03 06:55:44 | NULL | 0 | 9 | osdemo04 | nova-compute | compute | 28484 | 0 | nova |
- | 2011-01-30 21:27:28 | 2011-02-03 06:54:23 | NULL | 0 | 8 | osdemo05 | nova-compute | compute | 29284 | 0 | nova |
- +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
-You can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.
-
-You can then use `euca2ools` to test some items::
-
- euca-describe-images
- euca-describe-instances
-
-If you have issues with the API key, you may need to re-source your creds file::
-
- . /root/creds/novarc
-
-If you don’t get any immediate errors, you’re successfully making calls to your cloud!
-
-Spinning up a VM for Testing
-````````````````````````````
-
-(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
-
-The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
-
-UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we can’t use images without ramdisks yet, so we can’t use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, we’ll use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_.
-
-Download the image, and publish to your bucket:
-
-::
-
- image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
- wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
- uec-publish-tarball $image mybucket
-
-This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this.
-
-Create a keypair to SSH to the server:
-
-::
-
- euca-add-keypair mykey > mykey.priv
-
- chmod 0600 mykey.priv
-
-Boot your instance:
-
-::
-
- euca-run-instances $emi -k mykey -t m1.tiny
-
-($emi is replaced with the output from the previous command)
-
-Checking status, and confirming communication:
-
-Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
-
-::
-
- euca-describe-instances
-
-Once in a "running" state, you can use your SSH key connect:
-
-::
-
- ssh -i mykey.priv root@$ipaddress
-
-When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command:
-
-::
-
- euca-terminate-instances $instance-id
-
-You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d.
-
-For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
-
-Enjoy your new private cloud, and play responsibly!
diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst
deleted file mode 100644
index ff43aa90b..000000000
--- a/doc/source/adminguide/single.node.install.rst
+++ /dev/null
@@ -1,362 +0,0 @@
-Installing Nova on a Single Host
-================================
-
-Nova can be run on a single machine, and it is recommended that new users practice managing this type of installation before graduating to multi node systems.
-
-The fastest way to get a test cloud running is through our :doc:`../quickstart`. But for more detail on installing the system read this doc.
-
-
-Step 1 and 2: Get the latest Nova code system software
-------------------------------------------------------
-
-Depending on your system, the method for accomplishing this varies
-
-.. toctree::
- :maxdepth: 1
-
- distros/ubuntu.10.04
- distros/ubuntu.10.10
- distros/others
-
-
-Step 3: Build and install Nova services
----------------------------------------
-
-Switch to the base nova source directory.
-
-Then type or copy/paste in the following line to compile the Python code for OpenStack Compute.
-
-::
-
- sudo python setup.py build
- sudo python setup.py install
-
-
-When the installation is complete, you'll see the following lines:
-
-::
-
- Installing nova-network script to /usr/local/bin
- Installing nova-volume script to /usr/local/bin
- Installing nova-objectstore script to /usr/local/bin
- Installing nova-manage script to /usr/local/bin
- Installing nova-scheduler script to /usr/local/bin
- Installing nova-dhcpbridge script to /usr/local/bin
- Installing nova-compute script to /usr/local/bin
- Installing nova-instancemonitor script to /usr/local/bin
- Installing nova-api script to /usr/local/bin
- Installing nova-import-canonical-imagestore script to /usr/local/bin
-
- Installed /usr/local/lib/python2.6/dist-packages/nova-2010.1-py2.6.egg
- Processing dependencies for nova==2010.1
- Finished processing dependencies for nova==2010.1
-
-
-Step 4: Create the Nova Database
---------------------------------
-Type or copy/paste in the following line to create your nova db::
-
- sudo nova-manage db sync
-
-Step 5: Create a Nova administrator
------------------------------------
-Type or copy/paste in the following line to create a user named "anne."::
-
- sudo nova-manage user admin anne
-
-You see an access key and a secret key export, such as these made-up ones:::
-
- export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd
- export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7
-
-Step 6: Create the network
---------------------------
-
-Type or copy/paste in the following line to create a network prior to creating a project.
-
-::
-
- sudo nova-manage network create 10.0.0.0/8 1 64
-
-For this command, the IP address is the cidr notation for your netmask, such as 192.168.1.0/24. The value 1 is the total number of networks you want made, and the 64 value is the total number of ips in all networks.
-
-After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database.
-
-Step 7: Create a project with the user you created
---------------------------------------------------
-Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne.
-
-::
-
- sudo nova-manage project create IRT anne
-
-::
-
- Generating RSA private key, 1024 bit long modulus
- .....++++++
- ..++++++
- e is 65537 (0x10001)
- Using configuration from ./openssl.cnf
- Check that the request matches the signature
- Signature ok
- The Subject's Distinguished Name is as follows
- countryName :PRINTABLE:'US'
- stateOrProvinceName :PRINTABLE:'California'
- localityName :PRINTABLE:'MountainView'
- organizationName :PRINTABLE:'AnsoLabs'
- organizationalUnitName:PRINTABLE:'NovaDev'
- commonName :PRINTABLE:'anne-2010-10-12T21:12:35Z'
- Certificate is to be certified until Oct 12 21:12:35 2011 GMT (365 days)
-
- Write out database with 1 new entries
- Data Base Updated
-
-
-Step 8: Unzip the nova.zip
---------------------------
-
-You should have a nova.zip file in your current working directory. Unzip it with this command:
-
-::
-
- unzip nova.zip
-
-
-You'll see these files extract.
-
-::
-
- Archive: nova.zip
- extracting: novarc
- extracting: pk.pem
- extracting: cert.pem
- extracting: nova-vpn.conf
- extracting: cacert.pem
-
-
-Step 9: Source the rc file
---------------------------
-Type or copy/paste the following to source the novarc file in your current working directory.
-
-::
-
- . novarc
-
-
-Step 10: Pat yourself on the back :)
------------------------------------
-Congratulations, your cloud is up and running, you’ve created an admin user, created a network, retrieved the user's credentials and put them in your environment.
-
-Now you need an image.
-
-
-Step 11: Get an image
---------------------
-To make things easier, we've provided a small image on the Rackspace CDN. Use this command to get it on your server.
-
-::
-
- wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
-
-
-::
-
- --2010-10-12 21:40:55-- http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
- Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7
- Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected.
- HTTP request sent, awaiting response... 200 OK
- Length: 58520278 (56M) [application/x-gzip]
- Saving to: `images.tgz'
-
- 100%[======================================>] 58,520,278 14.1M/s in 3.9s
-
- 2010-10-12 21:40:59 (14.1 MB/s) - `images.tgz' saved [58520278/58520278]
-
-
-
-Step 12: Decompress the image file
-----------------------------------
-Use this command to extract the image files:::
-
- tar xvzf images.tgz
-
-You get a directory listing like so:::
-
- images
- |-- aki-lucid
- | |-- image
- | `-- info.json
- |-- ami-tiny
- | |-- image
- | `-- info.json
- `-- ari-lucid
- |-- image
- `-- info.json
-
-Step 13: Send commands to upload sample image to the cloud
-----------------------------------------------------------
-
-Type or copy/paste the following commands to create a manifest for the kernel.::
-
- euca-bundle-image -i images/aki-lucid/image -p kernel --kernel true
-
-You should see this in response:::
-
- Checking image
- Tarring image
- Encrypting image
- Splitting image...
- Part: kernel.part.0
- Generating manifest /tmp/kernel.manifest.xml
-
-Type or copy/paste the following commands to create a manifest for the ramdisk.::
-
- euca-bundle-image -i images/ari-lucid/image -p ramdisk --ramdisk true
-
-You should see this in response:::
-
- Checking image
- Tarring image
- Encrypting image
- Splitting image...
- Part: ramdisk.part.0
- Generating manifest /tmp/ramdisk.manifest.xml
-
-Type or copy/paste the following commands to upload the kernel bundle.::
-
- euca-upload-bundle -m /tmp/kernel.manifest.xml -b mybucket
-
-You should see this in response:::
-
- Checking bucket: mybucket
- Creating bucket: mybucket
- Uploading manifest file
- Uploading part: kernel.part.0
- Uploaded image as mybucket/kernel.manifest.xml
-
-Type or copy/paste the following commands to upload the ramdisk bundle.::
-
- euca-upload-bundle -m /tmp/ramdisk.manifest.xml -b mybucket
-
-You should see this in response:::
-
- Checking bucket: mybucket
- Uploading manifest file
- Uploading part: ramdisk.part.0
- Uploaded image as mybucket/ramdisk.manifest.xml
-
-Type or copy/paste the following commands to register the kernel and get its ID.::
-
- euca-register mybucket/kernel.manifest.xml
-
-You should see this in response:::
-
- IMAGE ami-fcbj2non
-
-Type or copy/paste the following commands to register the ramdisk and get its ID.::
-
- euca-register mybucket/ramdisk.manifest.xml
-
-You should see this in response:::
-
- IMAGE ami-orukptrc
-
-Type or copy/paste the following commands to create a manifest for the machine image associated with the ramdisk and kernel IDs that you got from the previous commands.::
-
- euca-bundle-image -i images/ami-tiny/image -p machine --kernel ami-fcbj2non --ramdisk ami-orukptrc
-
-You should see this in response:::
-
- Checking image
- Tarring image
- Encrypting image
- Splitting image...
- Part: machine.part.0
- Part: machine.part.1
- Part: machine.part.2
- Part: machine.part.3
- Part: machine.part.4
- Generating manifest /tmp/machine.manifest.xml
-
-Type or copy/paste the following commands to upload the machine image bundle.::
-
- euca-upload-bundle -m /tmp/machine.manifest.xml -b mybucket
-
-You should see this in response:::
-
- Checking bucket: mybucket
- Uploading manifest file
- Uploading part: machine.part.0
- Uploading part: machine.part.1
- Uploading part: machine.part.2
- Uploading part: machine.part.3
- Uploading part: machine.part.4
- Uploaded image as mybucket/machine.manifest.xml
-
-Type or copy/paste the following commands to register the machine image and get its ID.::
-
- euca-register mybucket/machine.manifest.xml
-
-You should see this in response:::
-
- IMAGE ami-g06qbntt
-
-Type or copy/paste the following commands to register a SSH keypair for use in starting and accessing the instances.::
-
- euca-add-keypair mykey > mykey.priv
- chmod 600 mykey.priv
-
-Type or copy/paste the following commands to run an instance using the keypair and IDs that we previously created.::
-
- euca-run-instances ami-g06qbntt --kernel ami-fcbj2non --ramdisk ami-orukptrc -k mykey
-
-You should see this in response:::
-
- RESERVATION r-0at28z12 IRT
- INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 scheduling mykey (IRT, None) m1.small 2010-10-18 19:02:10.443599
-
-Type or copy/paste the following commands to watch as the scheduler launches, and completes booting your instance.::
-
- euca-describe-instances
-
-You should see this in response:::
-
- RESERVATION r-0at28z12 IRT
- INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 launching mykey (IRT, cloud02) m1.small 2010-10-18 19:02:10.443599
-
-Type or copy/paste the following commands to see when loading is completed and the instance is running.::
-
- euca-describe-instances
-
-You should see this in response:::
-
- RESERVATION r-0at28z12 IRT
- INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 running mykey (IRT, cloud02) 0 m1.small 2010-10-18 19:02:10.443599
-
-Type or copy/paste the following commands to check that the virtual machine is running.::
-
- virsh list
-
-You should see this in response:::
-
- Id Name State
- ----------------------------------
- 1 2842445831 running
-
-Type or copy/paste the following commands to ssh to the instance using your private key.::
-
- ssh -i mykey.priv root@10.0.0.3
-
-
-Troubleshooting Installation
-----------------------------
-
-If you see an "error loading the config file './openssl.cnf'" it means you can copy the openssl.cnf file to the location where Nova expects it and reboot, then try the command again.
-
-::
-
- cp /etc/ssl/openssl.cnf ~
- sudo reboot
-
-
-
diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst
new file mode 100644
index 000000000..329a465db
--- /dev/null
+++ b/doc/source/api/autoindex.rst
@@ -0,0 +1,144 @@
+.. toctree::
+ :maxdepth: 1
+
+ nova..adminclient.rst
+ nova..api.direct.rst
+ nova..api.ec2.admin.rst
+ nova..api.ec2.apirequest.rst
+ nova..api.ec2.cloud.rst
+ nova..api.ec2.metadatarequesthandler.rst
+ nova..api.openstack.auth.rst
+ nova..api.openstack.backup_schedules.rst
+ nova..api.openstack.common.rst
+ nova..api.openstack.consoles.rst
+ nova..api.openstack.faults.rst
+ nova..api.openstack.flavors.rst
+ nova..api.openstack.images.rst
+ nova..api.openstack.servers.rst
+ nova..api.openstack.shared_ip_groups.rst
+ nova..api.openstack.zones.rst
+ nova..auth.dbdriver.rst
+ nova..auth.fakeldap.rst
+ nova..auth.ldapdriver.rst
+ nova..auth.manager.rst
+ nova..auth.signer.rst
+ nova..cloudpipe.pipelib.rst
+ nova..compute.api.rst
+ nova..compute.instance_types.rst
+ nova..compute.manager.rst
+ nova..compute.monitor.rst
+ nova..compute.power_state.rst
+ nova..console.api.rst
+ nova..console.fake.rst
+ nova..console.manager.rst
+ nova..console.xvp.rst
+ nova..context.rst
+ nova..crypto.rst
+ nova..db.api.rst
+ nova..db.base.rst
+ nova..db.migration.rst
+ nova..db.sqlalchemy.api.rst
+ nova..db.sqlalchemy.migrate_repo.manage.rst
+ nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
+ nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
+ nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
+ nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
+ nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
+ nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
+ nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
+ nova..db.sqlalchemy.migration.rst
+ nova..db.sqlalchemy.models.rst
+ nova..db.sqlalchemy.session.rst
+ nova..exception.rst
+ nova..fakememcache.rst
+ nova..fakerabbit.rst
+ nova..flags.rst
+ nova..image.glance.rst
+ nova..image.local.rst
+ nova..image.s3.rst
+ nova..image.service.rst
+ nova..log.rst
+ nova..manager.rst
+ nova..network.api.rst
+ nova..network.linux_net.rst
+ nova..network.manager.rst
+ nova..objectstore.bucket.rst
+ nova..objectstore.handler.rst
+ nova..objectstore.image.rst
+ nova..objectstore.stored.rst
+ nova..quota.rst
+ nova..rpc.rst
+ nova..scheduler.chance.rst
+ nova..scheduler.driver.rst
+ nova..scheduler.manager.rst
+ nova..scheduler.simple.rst
+ nova..scheduler.zone.rst
+ nova..service.rst
+ nova..test.rst
+ nova..tests.api.openstack.fakes.rst
+ nova..tests.api.openstack.test_adminapi.rst
+ nova..tests.api.openstack.test_api.rst
+ nova..tests.api.openstack.test_auth.rst
+ nova..tests.api.openstack.test_common.rst
+ nova..tests.api.openstack.test_faults.rst
+ nova..tests.api.openstack.test_flavors.rst
+ nova..tests.api.openstack.test_images.rst
+ nova..tests.api.openstack.test_ratelimiting.rst
+ nova..tests.api.openstack.test_servers.rst
+ nova..tests.api.openstack.test_shared_ip_groups.rst
+ nova..tests.api.openstack.test_zones.rst
+ nova..tests.api.test_wsgi.rst
+ nova..tests.db.fakes.rst
+ nova..tests.declare_flags.rst
+ nova..tests.fake_flags.rst
+ nova..tests.glance.stubs.rst
+ nova..tests.hyperv_unittest.rst
+ nova..tests.objectstore_unittest.rst
+ nova..tests.real_flags.rst
+ nova..tests.runtime_flags.rst
+ nova..tests.test_access.rst
+ nova..tests.test_api.rst
+ nova..tests.test_auth.rst
+ nova..tests.test_cloud.rst
+ nova..tests.test_compute.rst
+ nova..tests.test_console.rst
+ nova..tests.test_direct.rst
+ nova..tests.test_flags.rst
+ nova..tests.test_instance_types.rst
+ nova..tests.test_localization.rst
+ nova..tests.test_log.rst
+ nova..tests.test_middleware.rst
+ nova..tests.test_misc.rst
+ nova..tests.test_network.rst
+ nova..tests.test_quota.rst
+ nova..tests.test_rpc.rst
+ nova..tests.test_scheduler.rst
+ nova..tests.test_service.rst
+ nova..tests.test_test.rst
+ nova..tests.test_twistd.rst
+ nova..tests.test_utils.rst
+ nova..tests.test_virt.rst
+ nova..tests.test_volume.rst
+ nova..tests.test_xenapi.rst
+ nova..tests.xenapi.stubs.rst
+ nova..twistd.rst
+ nova..utils.rst
+ nova..version.rst
+ nova..virt.connection.rst
+ nova..virt.disk.rst
+ nova..virt.fake.rst
+ nova..virt.hyperv.rst
+ nova..virt.images.rst
+ nova..virt.libvirt_conn.rst
+ nova..virt.xenapi.fake.rst
+ nova..virt.xenapi.network_utils.rst
+ nova..virt.xenapi.vm_utils.rst
+ nova..virt.xenapi.vmops.rst
+ nova..virt.xenapi.volume_utils.rst
+ nova..virt.xenapi.volumeops.rst
+ nova..virt.xenapi_conn.rst
+ nova..volume.api.rst
+ nova..volume.driver.rst
+ nova..volume.manager.rst
+ nova..volume.san.rst
+ nova..wsgi.rst
diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst
new file mode 100644
index 000000000..35fa839e1
--- /dev/null
+++ b/doc/source/api/nova..adminclient.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..adminclient` Module
+==============================================================================
+.. automodule:: nova..adminclient
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.direct.rst b/doc/source/api/nova..api.direct.rst
new file mode 100644
index 000000000..a1705c707
--- /dev/null
+++ b/doc/source/api/nova..api.direct.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.direct` Module
+==============================================================================
+.. automodule:: nova..api.direct
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst
new file mode 100644
index 000000000..4e9ab308b
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.admin.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.admin` Module
+==============================================================================
+.. automodule:: nova..api.ec2.admin
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst
new file mode 100644
index 000000000..c17a2ff3a
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.apirequest.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.apirequest` Module
+==============================================================================
+.. automodule:: nova..api.ec2.apirequest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst
new file mode 100644
index 000000000..f6145c217
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.cloud.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.cloud` Module
+==============================================================================
+.. automodule:: nova..api.ec2.cloud
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
new file mode 100644
index 000000000..75f5169e5
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.metadatarequesthandler` Module
+==============================================================================
+.. automodule:: nova..api.ec2.metadatarequesthandler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst
new file mode 100644
index 000000000..8c3f8f2da
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.auth.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.auth` Module
+==============================================================================
+.. automodule:: nova..api.openstack.auth
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst
new file mode 100644
index 000000000..6b406f12d
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.backup_schedules.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.backup_schedules` Module
+==============================================================================
+.. automodule:: nova..api.openstack.backup_schedules
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.common.rst b/doc/source/api/nova..api.openstack.common.rst
new file mode 100644
index 000000000..4fd734790
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.common.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.common` Module
+==============================================================================
+.. automodule:: nova..api.openstack.common
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.consoles.rst b/doc/source/api/nova..api.openstack.consoles.rst
new file mode 100644
index 000000000..1e3e09599
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.consoles.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.consoles` Module
+==============================================================================
+.. automodule:: nova..api.openstack.consoles
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst
new file mode 100644
index 000000000..7b25561f7
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.faults.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.faults` Module
+==============================================================================
+.. automodule:: nova..api.openstack.faults
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst
new file mode 100644
index 000000000..0deb724de
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.flavors.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.flavors` Module
+==============================================================================
+.. automodule:: nova..api.openstack.flavors
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst
new file mode 100644
index 000000000..82bd5f1e8
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.images.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.images` Module
+==============================================================================
+.. automodule:: nova..api.openstack.images
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst
new file mode 100644
index 000000000..c36856ea2
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.servers.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.servers` Module
+==============================================================================
+.. automodule:: nova..api.openstack.servers
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.shared_ip_groups.rst b/doc/source/api/nova..api.openstack.shared_ip_groups.rst
new file mode 100644
index 000000000..4b1f44efe
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.shared_ip_groups.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.shared_ip_groups` Module
+==============================================================================
+.. automodule:: nova..api.openstack.shared_ip_groups
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.zones.rst b/doc/source/api/nova..api.openstack.zones.rst
new file mode 100644
index 000000000..ebe4569c5
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.zones.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.zones` Module
+==============================================================================
+.. automodule:: nova..api.openstack.zones
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst
new file mode 100644
index 000000000..7de68b6e0
--- /dev/null
+++ b/doc/source/api/nova..auth.dbdriver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.dbdriver` Module
+==============================================================================
+.. automodule:: nova..auth.dbdriver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst
new file mode 100644
index 000000000..ca8a3ad4d
--- /dev/null
+++ b/doc/source/api/nova..auth.fakeldap.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.fakeldap` Module
+==============================================================================
+.. automodule:: nova..auth.fakeldap
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst
new file mode 100644
index 000000000..c44463522
--- /dev/null
+++ b/doc/source/api/nova..auth.ldapdriver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.ldapdriver` Module
+==============================================================================
+.. automodule:: nova..auth.ldapdriver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst
new file mode 100644
index 000000000..bc5ce2ec3
--- /dev/null
+++ b/doc/source/api/nova..auth.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.manager` Module
+==============================================================================
+.. automodule:: nova..auth.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst
new file mode 100644
index 000000000..aad824ead
--- /dev/null
+++ b/doc/source/api/nova..auth.signer.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.signer` Module
+==============================================================================
+.. automodule:: nova..auth.signer
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst
new file mode 100644
index 000000000..054aaf484
--- /dev/null
+++ b/doc/source/api/nova..cloudpipe.pipelib.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..cloudpipe.pipelib` Module
+==============================================================================
+.. automodule:: nova..cloudpipe.pipelib
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.api.rst b/doc/source/api/nova..compute.api.rst
new file mode 100644
index 000000000..caa66313a
--- /dev/null
+++ b/doc/source/api/nova..compute.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.api` Module
+==============================================================================
+.. automodule:: nova..compute.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst
new file mode 100644
index 000000000..d206ff3a4
--- /dev/null
+++ b/doc/source/api/nova..compute.instance_types.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.instance_types` Module
+==============================================================================
+.. automodule:: nova..compute.instance_types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst
new file mode 100644
index 000000000..33a337c39
--- /dev/null
+++ b/doc/source/api/nova..compute.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.manager` Module
+==============================================================================
+.. automodule:: nova..compute.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst
new file mode 100644
index 000000000..a91169ecd
--- /dev/null
+++ b/doc/source/api/nova..compute.monitor.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.monitor` Module
+==============================================================================
+.. automodule:: nova..compute.monitor
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst
new file mode 100644
index 000000000..41b1080e5
--- /dev/null
+++ b/doc/source/api/nova..compute.power_state.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.power_state` Module
+==============================================================================
+.. automodule:: nova..compute.power_state
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.api.rst b/doc/source/api/nova..console.api.rst
new file mode 100644
index 000000000..82a51d4c7
--- /dev/null
+++ b/doc/source/api/nova..console.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.api` Module
+==============================================================================
+.. automodule:: nova..console.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.fake.rst b/doc/source/api/nova..console.fake.rst
new file mode 100644
index 000000000..f053f85d6
--- /dev/null
+++ b/doc/source/api/nova..console.fake.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.fake` Module
+==============================================================================
+.. automodule:: nova..console.fake
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.manager.rst b/doc/source/api/nova..console.manager.rst
new file mode 100644
index 000000000..f9283a6c3
--- /dev/null
+++ b/doc/source/api/nova..console.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.manager` Module
+==============================================================================
+.. automodule:: nova..console.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.xvp.rst b/doc/source/api/nova..console.xvp.rst
new file mode 100644
index 000000000..a0887009e
--- /dev/null
+++ b/doc/source/api/nova..console.xvp.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.xvp` Module
+==============================================================================
+.. automodule:: nova..console.xvp
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst
new file mode 100644
index 000000000..9de1adb24
--- /dev/null
+++ b/doc/source/api/nova..context.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..context` Module
+==============================================================================
+.. automodule:: nova..context
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst
new file mode 100644
index 000000000..af9f63634
--- /dev/null
+++ b/doc/source/api/nova..crypto.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..crypto` Module
+==============================================================================
+.. automodule:: nova..crypto
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst
new file mode 100644
index 000000000..6d998fbb2
--- /dev/null
+++ b/doc/source/api/nova..db.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.api` Module
+==============================================================================
+.. automodule:: nova..db.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.base.rst b/doc/source/api/nova..db.base.rst
new file mode 100644
index 000000000..29fb417d6
--- /dev/null
+++ b/doc/source/api/nova..db.base.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.base` Module
+==============================================================================
+.. automodule:: nova..db.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.migration.rst b/doc/source/api/nova..db.migration.rst
new file mode 100644
index 000000000..71dfea301
--- /dev/null
+++ b/doc/source/api/nova..db.migration.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.migration` Module
+==============================================================================
+.. automodule:: nova..db.migration
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst
new file mode 100644
index 000000000..76d0c1bd3
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.api` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
new file mode 100644
index 000000000..93decfb27
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.manage` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.manage
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
new file mode 100644
index 000000000..4b1219edb
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.001_austin` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.001_austin
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
new file mode 100644
index 000000000..82f1f4680
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.002_bexar` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.002_bexar
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
new file mode 100644
index 000000000..98f3e8da7
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
new file mode 100644
index 000000000..5cbb81191
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
new file mode 100644
index 000000000..cef0c243e
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
new file mode 100644
index 000000000..a15697196
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
new file mode 100644
index 000000000..38842d1af
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migration.rst b/doc/source/api/nova..db.sqlalchemy.migration.rst
new file mode 100644
index 000000000..3a9b01b9a
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migration.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migration` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migration
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst
new file mode 100644
index 000000000..9c795d7f5
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.models.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.models` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.models
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst
new file mode 100644
index 000000000..cbfd6416a
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.session.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.session` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.session
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst
new file mode 100644
index 000000000..97ac6b752
--- /dev/null
+++ b/doc/source/api/nova..exception.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..exception` Module
+==============================================================================
+.. automodule:: nova..exception
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..fakememcache.rst b/doc/source/api/nova..fakememcache.rst
new file mode 100644
index 000000000..7e7ffb98b
--- /dev/null
+++ b/doc/source/api/nova..fakememcache.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..fakememcache` Module
+==============================================================================
+.. automodule:: nova..fakememcache
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst
new file mode 100644
index 000000000..f1e27c266
--- /dev/null
+++ b/doc/source/api/nova..fakerabbit.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..fakerabbit` Module
+==============================================================================
+.. automodule:: nova..fakerabbit
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst
new file mode 100644
index 000000000..08165be44
--- /dev/null
+++ b/doc/source/api/nova..flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..flags` Module
+==============================================================================
+.. automodule:: nova..flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.glance.rst b/doc/source/api/nova..image.glance.rst
new file mode 100644
index 000000000..b0882d5ec
--- /dev/null
+++ b/doc/source/api/nova..image.glance.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.glance` Module
+==============================================================================
+.. automodule:: nova..image.glance
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.local.rst b/doc/source/api/nova..image.local.rst
new file mode 100644
index 000000000..b6ad5470b
--- /dev/null
+++ b/doc/source/api/nova..image.local.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.local` Module
+==============================================================================
+.. automodule:: nova..image.local
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.s3.rst b/doc/source/api/nova..image.s3.rst
new file mode 100644
index 000000000..e5b236127
--- /dev/null
+++ b/doc/source/api/nova..image.s3.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.s3` Module
+==============================================================================
+.. automodule:: nova..image.s3
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst
new file mode 100644
index 000000000..78ef1ecca
--- /dev/null
+++ b/doc/source/api/nova..image.service.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.service` Module
+==============================================================================
+.. automodule:: nova..image.service
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..log.rst b/doc/source/api/nova..log.rst
new file mode 100644
index 000000000..ff209709f
--- /dev/null
+++ b/doc/source/api/nova..log.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..log` Module
+==============================================================================
+.. automodule:: nova..log
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst
new file mode 100644
index 000000000..576902491
--- /dev/null
+++ b/doc/source/api/nova..manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..manager` Module
+==============================================================================
+.. automodule:: nova..manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..network.api.rst b/doc/source/api/nova..network.api.rst
new file mode 100644
index 000000000..b63be2ba3
--- /dev/null
+++ b/doc/source/api/nova..network.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..network.api` Module
+==============================================================================
+.. automodule:: nova..network.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst
new file mode 100644
index 000000000..7af78d5ad
--- /dev/null
+++ b/doc/source/api/nova..network.linux_net.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..network.linux_net` Module
+==============================================================================
+.. automodule:: nova..network.linux_net
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst
new file mode 100644
index 000000000..0ea705533
--- /dev/null
+++ b/doc/source/api/nova..network.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..network.manager` Module
+==============================================================================
+.. automodule:: nova..network.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst
new file mode 100644
index 000000000..3bfdf639c
--- /dev/null
+++ b/doc/source/api/nova..objectstore.bucket.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.bucket` Module
+==============================================================================
+.. automodule:: nova..objectstore.bucket
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst
new file mode 100644
index 000000000..0eb8c4efb
--- /dev/null
+++ b/doc/source/api/nova..objectstore.handler.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.handler` Module
+==============================================================================
+.. automodule:: nova..objectstore.handler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst
new file mode 100644
index 000000000..fa4c971f1
--- /dev/null
+++ b/doc/source/api/nova..objectstore.image.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.image` Module
+==============================================================================
+.. automodule:: nova..objectstore.image
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst
new file mode 100644
index 000000000..2b1d997a3
--- /dev/null
+++ b/doc/source/api/nova..objectstore.stored.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.stored` Module
+==============================================================================
+.. automodule:: nova..objectstore.stored
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst
new file mode 100644
index 000000000..4140d95d6
--- /dev/null
+++ b/doc/source/api/nova..quota.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..quota` Module
+==============================================================================
+.. automodule:: nova..quota
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst
new file mode 100644
index 000000000..5b2a9b8e2
--- /dev/null
+++ b/doc/source/api/nova..rpc.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..rpc` Module
+==============================================================================
+.. automodule:: nova..rpc
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst
new file mode 100644
index 000000000..89c074c8f
--- /dev/null
+++ b/doc/source/api/nova..scheduler.chance.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.chance` Module
+==============================================================================
+.. automodule:: nova..scheduler.chance
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst
new file mode 100644
index 000000000..793ed9c7b
--- /dev/null
+++ b/doc/source/api/nova..scheduler.driver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.driver` Module
+==============================================================================
+.. automodule:: nova..scheduler.driver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst
new file mode 100644
index 000000000..d0fc7c423
--- /dev/null
+++ b/doc/source/api/nova..scheduler.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.manager` Module
+==============================================================================
+.. automodule:: nova..scheduler.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst
new file mode 100644
index 000000000..dacc2cf30
--- /dev/null
+++ b/doc/source/api/nova..scheduler.simple.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.simple` Module
+==============================================================================
+.. automodule:: nova..scheduler.simple
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.zone.rst b/doc/source/api/nova..scheduler.zone.rst
new file mode 100644
index 000000000..54c4bf201
--- /dev/null
+++ b/doc/source/api/nova..scheduler.zone.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.zone` Module
+==============================================================================
+.. automodule:: nova..scheduler.zone
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst
new file mode 100644
index 000000000..2d2dfcf2e
--- /dev/null
+++ b/doc/source/api/nova..service.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..service` Module
+==============================================================================
+.. automodule:: nova..service
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst
new file mode 100644
index 000000000..a6bdb6f1f
--- /dev/null
+++ b/doc/source/api/nova..test.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..test` Module
+==============================================================================
+.. automodule:: nova..test
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst
new file mode 100644
index 000000000..4a9ff5938
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.fakes.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.fakes` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.fakes
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_adminapi.rst b/doc/source/api/nova..tests.api.openstack.test_adminapi.rst
new file mode 100644
index 000000000..19a85ca0f
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_adminapi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_adminapi` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_adminapi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst
new file mode 100644
index 000000000..68106d221
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_api` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst
new file mode 100644
index 000000000..9f0011669
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_auth.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_auth` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_auth
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_common.rst b/doc/source/api/nova..tests.api.openstack.test_common.rst
new file mode 100644
index 000000000..82f40ecb8
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_common.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_common` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_common
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst
new file mode 100644
index 000000000..b839ae8a3
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_faults.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_faults` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_faults
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst
new file mode 100644
index 000000000..471fac56e
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_flavors.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_flavors` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_flavors
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst
new file mode 100644
index 000000000..57ae93c8c
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_images.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_images` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_images
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
new file mode 100644
index 000000000..9a857f795
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_ratelimiting` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_ratelimiting
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst
new file mode 100644
index 000000000..ea602e6ab
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_servers.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_servers` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_servers
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst b/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
new file mode 100644
index 000000000..48814af00
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_shared_ip_groups` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_shared_ip_groups
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_zones.rst b/doc/source/api/nova..tests.api.openstack.test_zones.rst
new file mode 100644
index 000000000..ba7078e63
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_zones.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_zones` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_zones
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst
new file mode 100644
index 000000000..8e79caa4d
--- /dev/null
+++ b/doc/source/api/nova..tests.api.test_wsgi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.test_wsgi` Module
+==============================================================================
+.. automodule:: nova..tests.api.test_wsgi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.db.fakes.rst b/doc/source/api/nova..tests.db.fakes.rst
new file mode 100644
index 000000000..cc79e55e2
--- /dev/null
+++ b/doc/source/api/nova..tests.db.fakes.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.db.fakes` Module
+==============================================================================
+.. automodule:: nova..tests.db.fakes
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst
new file mode 100644
index 000000000..524e72e91
--- /dev/null
+++ b/doc/source/api/nova..tests.declare_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.declare_flags` Module
+==============================================================================
+.. automodule:: nova..tests.declare_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst
new file mode 100644
index 000000000..a8dc3df36
--- /dev/null
+++ b/doc/source/api/nova..tests.fake_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.fake_flags` Module
+==============================================================================
+.. automodule:: nova..tests.fake_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.glance.stubs.rst b/doc/source/api/nova..tests.glance.stubs.rst
new file mode 100644
index 000000000..7ef5fccbe
--- /dev/null
+++ b/doc/source/api/nova..tests.glance.stubs.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.glance.stubs` Module
+==============================================================================
+.. automodule:: nova..tests.glance.stubs
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.hyperv_unittest.rst b/doc/source/api/nova..tests.hyperv_unittest.rst
new file mode 100644
index 000000000..c08443121
--- /dev/null
+++ b/doc/source/api/nova..tests.hyperv_unittest.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.hyperv_unittest` Module
+==============================================================================
+.. automodule:: nova..tests.hyperv_unittest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst
new file mode 100644
index 000000000..0ae252f04
--- /dev/null
+++ b/doc/source/api/nova..tests.objectstore_unittest.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.objectstore_unittest` Module
+==============================================================================
+.. automodule:: nova..tests.objectstore_unittest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst
new file mode 100644
index 000000000..e9c0d1abd
--- /dev/null
+++ b/doc/source/api/nova..tests.real_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.real_flags` Module
+==============================================================================
+.. automodule:: nova..tests.real_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst
new file mode 100644
index 000000000..984e21199
--- /dev/null
+++ b/doc/source/api/nova..tests.runtime_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.runtime_flags` Module
+==============================================================================
+.. automodule:: nova..tests.runtime_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_access.rst b/doc/source/api/nova..tests.test_access.rst
new file mode 100644
index 000000000..300d8109e
--- /dev/null
+++ b/doc/source/api/nova..tests.test_access.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_access` Module
+==============================================================================
+.. automodule:: nova..tests.test_access
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_api.rst b/doc/source/api/nova..tests.test_api.rst
new file mode 100644
index 000000000..f9473062e
--- /dev/null
+++ b/doc/source/api/nova..tests.test_api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_api` Module
+==============================================================================
+.. automodule:: nova..tests.test_api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_auth.rst b/doc/source/api/nova..tests.test_auth.rst
new file mode 100644
index 000000000..ff4445ae4
--- /dev/null
+++ b/doc/source/api/nova..tests.test_auth.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_auth` Module
+==============================================================================
+.. automodule:: nova..tests.test_auth
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_cloud.rst b/doc/source/api/nova..tests.test_cloud.rst
new file mode 100644
index 000000000..7bd03db9a
--- /dev/null
+++ b/doc/source/api/nova..tests.test_cloud.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_cloud` Module
+==============================================================================
+.. automodule:: nova..tests.test_cloud
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_compute.rst b/doc/source/api/nova..tests.test_compute.rst
new file mode 100644
index 000000000..90fd6e9d1
--- /dev/null
+++ b/doc/source/api/nova..tests.test_compute.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_compute` Module
+==============================================================================
+.. automodule:: nova..tests.test_compute
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_console.rst b/doc/source/api/nova..tests.test_console.rst
new file mode 100644
index 000000000..f695f5d17
--- /dev/null
+++ b/doc/source/api/nova..tests.test_console.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_console` Module
+==============================================================================
+.. automodule:: nova..tests.test_console
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_direct.rst b/doc/source/api/nova..tests.test_direct.rst
new file mode 100644
index 000000000..4f7adef19
--- /dev/null
+++ b/doc/source/api/nova..tests.test_direct.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_direct` Module
+==============================================================================
+.. automodule:: nova..tests.test_direct
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_flags.rst b/doc/source/api/nova..tests.test_flags.rst
new file mode 100644
index 000000000..2ec35d6c2
--- /dev/null
+++ b/doc/source/api/nova..tests.test_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_flags` Module
+==============================================================================
+.. automodule:: nova..tests.test_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_instance_types.rst b/doc/source/api/nova..tests.test_instance_types.rst
new file mode 100644
index 000000000..ebe689966
--- /dev/null
+++ b/doc/source/api/nova..tests.test_instance_types.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_instance_types` Module
+==============================================================================
+.. automodule:: nova..tests.test_instance_types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_localization.rst b/doc/source/api/nova..tests.test_localization.rst
new file mode 100644
index 000000000..d93c83ba7
--- /dev/null
+++ b/doc/source/api/nova..tests.test_localization.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_localization` Module
+==============================================================================
+.. automodule:: nova..tests.test_localization
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_log.rst b/doc/source/api/nova..tests.test_log.rst
new file mode 100644
index 000000000..04ff5ead1
--- /dev/null
+++ b/doc/source/api/nova..tests.test_log.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_log` Module
+==============================================================================
+.. automodule:: nova..tests.test_log
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_middleware.rst b/doc/source/api/nova..tests.test_middleware.rst
new file mode 100644
index 000000000..2f9df5832
--- /dev/null
+++ b/doc/source/api/nova..tests.test_middleware.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_middleware` Module
+==============================================================================
+.. automodule:: nova..tests.test_middleware
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_misc.rst b/doc/source/api/nova..tests.test_misc.rst
new file mode 100644
index 000000000..4975f89d7
--- /dev/null
+++ b/doc/source/api/nova..tests.test_misc.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_misc` Module
+==============================================================================
+.. automodule:: nova..tests.test_misc
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_network.rst b/doc/source/api/nova..tests.test_network.rst
new file mode 100644
index 000000000..3a4b04ea4
--- /dev/null
+++ b/doc/source/api/nova..tests.test_network.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_network` Module
+==============================================================================
+.. automodule:: nova..tests.test_network
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_quota.rst b/doc/source/api/nova..tests.test_quota.rst
new file mode 100644
index 000000000..24ebf9ca3
--- /dev/null
+++ b/doc/source/api/nova..tests.test_quota.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_quota` Module
+==============================================================================
+.. automodule:: nova..tests.test_quota
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_rpc.rst b/doc/source/api/nova..tests.test_rpc.rst
new file mode 100644
index 000000000..c141d6889
--- /dev/null
+++ b/doc/source/api/nova..tests.test_rpc.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_rpc` Module
+==============================================================================
+.. automodule:: nova..tests.test_rpc
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_scheduler.rst b/doc/source/api/nova..tests.test_scheduler.rst
new file mode 100644
index 000000000..1cd9991db
--- /dev/null
+++ b/doc/source/api/nova..tests.test_scheduler.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_scheduler` Module
+==============================================================================
+.. automodule:: nova..tests.test_scheduler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_service.rst b/doc/source/api/nova..tests.test_service.rst
new file mode 100644
index 000000000..a264fbb55
--- /dev/null
+++ b/doc/source/api/nova..tests.test_service.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_service` Module
+==============================================================================
+.. automodule:: nova..tests.test_service
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_test.rst b/doc/source/api/nova..tests.test_test.rst
new file mode 100644
index 000000000..389eb3c99
--- /dev/null
+++ b/doc/source/api/nova..tests.test_test.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_test` Module
+==============================================================================
+.. automodule:: nova..tests.test_test
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_twistd.rst b/doc/source/api/nova..tests.test_twistd.rst
new file mode 100644
index 000000000..cae0c0a28
--- /dev/null
+++ b/doc/source/api/nova..tests.test_twistd.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_twistd` Module
+==============================================================================
+.. automodule:: nova..tests.test_twistd
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_utils.rst b/doc/source/api/nova..tests.test_utils.rst
new file mode 100644
index 000000000..d61a7021f
--- /dev/null
+++ b/doc/source/api/nova..tests.test_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_utils` Module
+==============================================================================
+.. automodule:: nova..tests.test_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_virt.rst b/doc/source/api/nova..tests.test_virt.rst
new file mode 100644
index 000000000..9b0dc1e46
--- /dev/null
+++ b/doc/source/api/nova..tests.test_virt.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_virt` Module
+==============================================================================
+.. automodule:: nova..tests.test_virt
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_volume.rst b/doc/source/api/nova..tests.test_volume.rst
new file mode 100644
index 000000000..b5affe53c
--- /dev/null
+++ b/doc/source/api/nova..tests.test_volume.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_volume` Module
+==============================================================================
+.. automodule:: nova..tests.test_volume
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_xenapi.rst b/doc/source/api/nova..tests.test_xenapi.rst
new file mode 100644
index 000000000..7128baee4
--- /dev/null
+++ b/doc/source/api/nova..tests.test_xenapi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_xenapi` Module
+==============================================================================
+.. automodule:: nova..tests.test_xenapi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.xenapi.stubs.rst b/doc/source/api/nova..tests.xenapi.stubs.rst
new file mode 100644
index 000000000..356eed9a7
--- /dev/null
+++ b/doc/source/api/nova..tests.xenapi.stubs.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.xenapi.stubs` Module
+==============================================================================
+.. automodule:: nova..tests.xenapi.stubs
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst
new file mode 100644
index 000000000..d4145396d
--- /dev/null
+++ b/doc/source/api/nova..twistd.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..twistd` Module
+==============================================================================
+.. automodule:: nova..twistd
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst
new file mode 100644
index 000000000..1131d1080
--- /dev/null
+++ b/doc/source/api/nova..utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..utils` Module
+==============================================================================
+.. automodule:: nova..utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..version.rst b/doc/source/api/nova..version.rst
new file mode 100644
index 000000000..4b0fc078f
--- /dev/null
+++ b/doc/source/api/nova..version.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..version` Module
+==============================================================================
+.. automodule:: nova..version
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst
new file mode 100644
index 000000000..caf766765
--- /dev/null
+++ b/doc/source/api/nova..virt.connection.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.connection` Module
+==============================================================================
+.. automodule:: nova..virt.connection
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.disk.rst b/doc/source/api/nova..virt.disk.rst
new file mode 100644
index 000000000..4a6c0f406
--- /dev/null
+++ b/doc/source/api/nova..virt.disk.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.disk` Module
+==============================================================================
+.. automodule:: nova..virt.disk
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst
new file mode 100644
index 000000000..06ecdbf7d
--- /dev/null
+++ b/doc/source/api/nova..virt.fake.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.fake` Module
+==============================================================================
+.. automodule:: nova..virt.fake
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.hyperv.rst b/doc/source/api/nova..virt.hyperv.rst
new file mode 100644
index 000000000..48d89378e
--- /dev/null
+++ b/doc/source/api/nova..virt.hyperv.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.hyperv` Module
+==============================================================================
+.. automodule:: nova..virt.hyperv
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst
new file mode 100644
index 000000000..4fdeb7af8
--- /dev/null
+++ b/doc/source/api/nova..virt.images.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.images` Module
+==============================================================================
+.. automodule:: nova..virt.images
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst
new file mode 100644
index 000000000..7fb8aed5f
--- /dev/null
+++ b/doc/source/api/nova..virt.libvirt_conn.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.libvirt_conn` Module
+==============================================================================
+.. automodule:: nova..virt.libvirt_conn
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.fake.rst b/doc/source/api/nova..virt.xenapi.fake.rst
new file mode 100644
index 000000000..752dabb14
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.fake.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.fake` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.fake
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.network_utils.rst b/doc/source/api/nova..virt.xenapi.network_utils.rst
new file mode 100644
index 000000000..15f52973e
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.network_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.network_utils` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.network_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.vm_utils.rst b/doc/source/api/nova..virt.xenapi.vm_utils.rst
new file mode 100644
index 000000000..18745dc71
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.vm_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.vm_utils` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.vm_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.vmops.rst b/doc/source/api/nova..virt.xenapi.vmops.rst
new file mode 100644
index 000000000..30662c58d
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.vmops.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.vmops` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.vmops
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.volume_utils.rst b/doc/source/api/nova..virt.xenapi.volume_utils.rst
new file mode 100644
index 000000000..413e4dc4b
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.volume_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.volume_utils` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.volume_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.volumeops.rst b/doc/source/api/nova..virt.xenapi.volumeops.rst
new file mode 100644
index 000000000..626f164df
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.volumeops.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.volumeops` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.volumeops
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi_conn.rst b/doc/source/api/nova..virt.xenapi_conn.rst
new file mode 100644
index 000000000..14ac5147f
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi_conn.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi_conn` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi_conn
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.api.rst b/doc/source/api/nova..volume.api.rst
new file mode 100644
index 000000000..8ad36e049
--- /dev/null
+++ b/doc/source/api/nova..volume.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.api` Module
+==============================================================================
+.. automodule:: nova..volume.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst
new file mode 100644
index 000000000..51f5c0729
--- /dev/null
+++ b/doc/source/api/nova..volume.driver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.driver` Module
+==============================================================================
+.. automodule:: nova..volume.driver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst
new file mode 100644
index 000000000..91a192a8f
--- /dev/null
+++ b/doc/source/api/nova..volume.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.manager` Module
+==============================================================================
+.. automodule:: nova..volume.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.san.rst b/doc/source/api/nova..volume.san.rst
new file mode 100644
index 000000000..1de068928
--- /dev/null
+++ b/doc/source/api/nova..volume.san.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.san` Module
+==============================================================================
+.. automodule:: nova..volume.san
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst
new file mode 100644
index 000000000..0bff1c332
--- /dev/null
+++ b/doc/source/api/nova..wsgi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..wsgi` Module
+==============================================================================
+.. automodule:: nova..wsgi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/community.rst b/doc/source/community.rst
index 4ae32f1eb..e925a47bd 100644
--- a/doc/source/community.rst
+++ b/doc/source/community.rst
@@ -18,7 +18,7 @@
Getting Involved
================
-The Nova community is a very friendly group and there are places online to join in with the
+The OpenStack community for Nova is a very friendly group and there are places online to join in with the
community. Feel free to ask questions. This document points you to some of the places where you can
communicate with people.
@@ -83,3 +83,13 @@ Twitter
Because all the cool kids do it: `@openstack <http://twitter.com/openstack>`_. Also follow the
`#openstack <http://search.twitter.com/search?q=%23openstack>`_ tag for relevant tweets.
+
+OpenStack Docs Site
+-------------------
+
+The `nova.openstack.org <http://nova.openstack.org>`_ site is geared towards developer documentation,
+and the `docs.openstack.org <http://docs.openstack.org>`_ site is intended for cloud administrators
+who are standing up and running OpenStack Compute in production. You can contribute to the Docs Site
+by using bzr and Launchpad and contributing to the openstack-manuals project at http://launchpad.net/openstack-manuals.
+
+
diff --git a/doc/source/images/vmwareapi_blockdiagram.jpg b/doc/source/images/vmwareapi_blockdiagram.jpg
new file mode 100644
index 000000000..1ae1fc8e0
--- /dev/null
+++ b/doc/source/images/vmwareapi_blockdiagram.jpg
Binary files differ
diff --git a/doc/source/index.rst b/doc/source/index.rst
index d337fb69f..846d3cfcd 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -32,11 +32,13 @@ Nova is written with the following design guidelines in mind:
* **API Compatibility**: Nova strives to provide API-compatible with popular systems like Amazon EC2
This documentation is generated by the Sphinx toolkit and lives in the source
-tree. Additional documentation on Nova and other components of OpenStack can
-be found on the `OpenStack wiki`_. Also see the :doc:`community` page for
-other ways to interact with the community.
+tree. Additional draft and project documentation on Nova and other components of OpenStack can
+be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_.
+
+Also see the :doc:`community` page for other ways to interact with the community.
.. _`OpenStack wiki`: http://wiki.openstack.org
+.. _`docs.openstack.org`: http://docs.openstack.org
Key Concepts
@@ -50,17 +52,7 @@ Key Concepts
service.architecture
nova.object.model
swift.object.model
-
-Administrator's Documentation
-=============================
-
-.. toctree::
- :maxdepth: 1
-
- livecd
- adminguide/index
- adminguide/single.node.install
- adminguide/multi.node.install
+ runnova/index
Developer Docs
==============
diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst
index bb9d7a7fe..1d8446f08 100644
--- a/doc/source/man/novamanage.rst
+++ b/doc/source/man/novamanage.rst
@@ -173,12 +173,72 @@ Nova Floating IPs
``nova-manage floating create <host> <ip_range>``
Creates floating IP addresses for the named host by the given range.
- floating delete <ip_range> Deletes floating IP addresses in the range given.
+
+``nova-manage floating delete <ip_range>``
+
+ Deletes floating IP addresses in the range given.
``nova-manage floating list``
Displays a list of all floating IP addresses.
+Nova Flavor
+~~~~~~~~~~~
+
+``nova-manage flavor list``
+
+ Outputs a list of all active flavors to the screen.
+
+``nova-manage flavor list --all``
+
+ Outputs a list of all flavors (active and inactive) to the screen.
+
+``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>``
+
+ creates a flavor with the following positional arguments:
+ * memory (expressed in megabytes)
+ * vcpu(s) (integer)
+ * local storage (expressed in gigabytes)
+ * flavorid (unique integer)
+ * swap space (expressed in megabytes, defaults to zero, optional)
+ * RXTX quotas (expressed in gigabytes, defaults to zero, optional)
+ * RXTX cap (expressed in gigabytes, defaults to zero, optional)
+
+``nova-manage flavor delete <name>``
+
+ Delete the flavor with the name <name>. This marks the flavor as inactive and cannot be launched. However, the record stays in the database for archival and billing purposes.
+
+``nova-manage flavor delete <name> --purge``
+
+ Purges the flavor with the name <name>. This removes this flavor from the database.
+
+Nova Instance_type
+~~~~~~~~~~~~~~~~~~
+
+The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used.
+
+Nova Images
+~~~~~~~~~~~
+
+``nova-manage image image_register <path> <owner>``
+
+ Registers an image with the image service.
+
+``nova-manage image kernel_register <path> <owner>``
+
+ Registers a kernel with the image service.
+
+``nova-manage image ramdisk_register <path> <owner>``
+
+ Registers a ramdisk with the image service.
+
+``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
+
+ Registers an image kernel and ramdisk with the image service.
+
+``nova-manage image convert <directory>``
+
+ Converts all images in directory from the old (Bexar) format to the new format.
FILES
========
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index e9687dc98..45cc4b879 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -64,6 +64,11 @@ Concept: Instances
An 'instance' is a word for a virtual machine that runs inside the cloud.
+Concept: Instance Type
+----------------------
+
+An 'instance type' describes the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching.
+
Concept: System Architecture
----------------------------
diff --git a/doc/source/object.model.rst b/doc/source/object.model.rst
index d02f151fd..419e89b0c 100644
--- a/doc/source/object.model.rst
+++ b/doc/source/object.model.rst
@@ -18,8 +18,6 @@
Object Model
============
-.. todo:: Add brief description for core models
-
.. graphviz::
digraph foo {
@@ -42,27 +40,27 @@ Object Model
Users
-----
-Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/adminguide/managing.users`.
+Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/runnova/managing.users`.
Projects
--------
-For Nova, access to images is based on the project. Read more at :doc:`/adminguide/managing.projects`.
+For Nova, access to images is based on the project. Read more at :doc:`/runnova/managing.projects`.
Images
------
-Images are binary files that run the operating system. Read more at :doc:`/adminguide/managing.images`.
+Images are binary files that run the operating system. Read more at :doc:`/runnova/managing.images`.
Instances
---------
-Instances are running virtual servers. Read more at :doc:`/adminguide/managing.instances`.
+Instances are running virtual servers. Read more at :doc:`/runnova/managing.instances`.
Volumes
-------
-.. todo:: Write doc about volumes
+Volumes offer extra block level storage to instances. Read more at `Managing Volumes <http://docs.openstack.org/openstack-compute/admin/content/ch05s07.html>`_.
Security Groups
---------------
@@ -72,7 +70,7 @@ In Nova, a security group is a named collection of network access rules, like fi
VLANs
-----
-VLAN is the default network mode for Nova. Read more at :doc:`/adminguide/network.vlan`.
+VLAN is the default network mode for Nova. Read more at :doc:`/runnova/network.vlan`.
IP Addresses
------------
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
index 17c9e10a8..84ed3fe01 100644
--- a/doc/source/quickstart.rst
+++ b/doc/source/quickstart.rst
@@ -54,7 +54,7 @@ Environment Variables
By tweaking the environment that nova.sh run in, you can build slightly
different configurations (though for more complex setups you should see
-:doc:`/adminguide/getting.started` and :doc:`/adminguide/multi.node.install`).
+`Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_).
* HOST_IP
* Default: address of first interface from the ifconfig command
diff --git a/doc/source/adminguide/binaries.rst b/doc/source/runnova/binaries.rst
index 5c50a51f1..023831021 100644
--- a/doc/source/adminguide/binaries.rst
+++ b/doc/source/runnova/binaries.rst
@@ -35,12 +35,12 @@ Nova api receives xml requests and sends them to the rest of the system. It is
nova-objectstore
----------------
-Nova objectstore is an ultra simple file-based storage system for images that replicates most of the S3 Api. It will soon be replaced with glance and a simple image manager.
+Nova objectstore is an ultra simple file-based storage system for images that replicates most of the S3 Api. It will soon be replaced with Glance (http://glance.openstack.org) and a simple image manager.
nova-compute
------------
-Nova compute is responsible for managing virtual machines. It loads a Service object which exposes the public methods on ComputeManager via rpc.
+Nova compute is responsible for managing virtual machines. It loads a Service object which exposes the public methods on ComputeManager via rpc.
nova-volume
-----------
diff --git a/doc/source/adminguide/euca2ools.rst b/doc/source/runnova/euca2ools.rst
index 6f0c57358..6f0c57358 100644
--- a/doc/source/adminguide/euca2ools.rst
+++ b/doc/source/runnova/euca2ools.rst
diff --git a/doc/source/runnova/flags.rst b/doc/source/runnova/flags.rst
new file mode 100644
index 000000000..1bfa022d9
--- /dev/null
+++ b/doc/source/runnova/flags.rst
@@ -0,0 +1,193 @@
+..
+ Copyright 2010-2011 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Flags and Flagfiles
+===================
+
+Nova uses a configuration file containing flags located in /etc/nova/nova.conf. You can get the most recent listing of avaialble flags by running nova-(servicename) --help, for example, nova-api --help.
+
+Here's a list of available flags and their default settings.
+
+ --ajax_console_proxy_port: port that ajax_console_proxy binds
+ (default: '8000')
+ --ajax_console_proxy_topic: the topic ajax proxy nodes listen on
+ (default: 'ajax_proxy')
+ --ajax_console_proxy_url: location of ajax console proxy, in the form
+ "http://127.0.0.1:8000"
+ (default: 'http://127.0.0.1:8000')
+ --auth_token_ttl: Seconds for auth tokens to linger
+ (default: '3600')
+ (an integer)
+ --aws_access_key_id: AWS Access ID
+ (default: 'admin')
+ --aws_secret_access_key: AWS Access Key
+ (default: 'admin')
+ --compute_manager: Manager for compute
+ (default: 'nova.compute.manager.ComputeManager')
+ --compute_topic: the topic compute nodes listen on
+ (default: 'compute')
+ --connection_type: libvirt, xenapi or fake
+ (default: 'libvirt')
+ --console_manager: Manager for console proxy
+ (default: 'nova.console.manager.ConsoleProxyManager')
+ --console_topic: the topic console proxy nodes listen on
+ (default: 'console')
+ --control_exchange: the main exchange to connect to
+ (default: 'nova')
+ --db_backend: The backend to use for db
+ (default: 'sqlalchemy')
+ --default_image: default image to use, testing only
+ (default: 'ami-11111')
+ --default_instance_type: default instance type to use, testing only
+ (default: 'm1.small')
+ --default_log_levels: list of logger=LEVEL pairs
+ (default: 'amqplib=WARN,sqlalchemy=WARN,eventlet.wsgi.server=WARN')
+ (a comma separated list)
+ --default_project: default project for openstack
+ (default: 'openstack')
+ --ec2_dmz_host: internal ip of api server
+ (default: '$my_ip')
+ --ec2_host: ip of api server
+ (default: '$my_ip')
+ --ec2_path: suffix for ec2
+ (default: '/services/Cloud')
+ --ec2_port: cloud controller port
+ (default: '8773')
+ (an integer)
+ --ec2_scheme: prefix for ec2
+ (default: 'http')
+ --[no]enable_new_services: Services to be added to the available pool on
+ create
+ (default: 'true')
+ --[no]fake_network: should we use fake network devices and addresses
+ (default: 'false')
+ --[no]fake_rabbit: use a fake rabbit
+ (default: 'false')
+ --glance_host: glance host
+ (default: '$my_ip')
+ --glance_port: glance port
+ (default: '9292')
+ (an integer)
+ -?,--[no]help: show this help
+ --[no]helpshort: show usage only for this module
+ --[no]helpxml: like --help, but generates XML output
+ --host: name of this node
+ (default: 'osdemo03')
+ --image_service: The service to use for retrieving and searching for images.
+ (default: 'nova.image.s3.S3ImageService')
+ --instance_name_template: Template string to be used to generate instance
+ names
+ (default: 'instance-%08x')
+ --logfile: output to named file
+ --logging_context_format_string: format string to use for log messages with
+ context
+ (default: '%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s
+ %(project)s] %(message)s')
+ --logging_debug_format_suffix: data to append to log format when level is
+ DEBUG
+ (default: 'from %(processName)s (pid=%(process)d) %(funcName)s
+ %(pathname)s:%(lineno)d')
+ --logging_default_format_string: format string to use for log messages without
+ context
+ (default: '%(asctime)s %(levelname)s %(name)s [-] %(message)s')
+ --logging_exception_prefix: prefix each line of exception output with this
+ format
+ (default: '(%(name)s): TRACE: ')
+ --my_ip: host ip address
+ (default: '184.106.73.68')
+ --network_manager: Manager for network
+ (default: 'nova.network.manager.VlanManager')
+ --network_topic: the topic network nodes listen on
+ (default: 'network')
+ --node_availability_zone: availability zone of this node
+ (default: 'nova')
+ --null_kernel: kernel image that indicates not to use a kernel, but to use a
+ raw disk image instead
+ (default: 'nokernel')
+ --osapi_host: ip of api server
+ (default: '$my_ip')
+ --osapi_path: suffix for openstack
+ (default: '/v1.0/')
+ --osapi_port: OpenStack API port
+ (default: '8774')
+ (an integer)
+ --osapi_scheme: prefix for openstack
+ (default: 'http')
+ --periodic_interval: seconds between running periodic tasks
+ (default: '60')
+ (a positive integer)
+ --pidfile: pidfile to use for this service
+ --rabbit_host: rabbit host
+ (default: 'localhost')
+ --rabbit_max_retries: rabbit connection attempts
+ (default: '12')
+ (an integer)
+ --rabbit_password: rabbit password
+ (default: 'guest')
+ --rabbit_port: rabbit port
+ (default: '5672')
+ (an integer)
+ --rabbit_retry_interval: rabbit connection retry interval
+ (default: '10')
+ (an integer)
+ --rabbit_userid: rabbit userid
+ (default: 'guest')
+ --rabbit_virtual_host: rabbit virtual host
+ (default: '/')
+ --region_list: list of region=fqdn pairs separated by commas
+ (default: '')
+ (a comma separated list)
+ --report_interval: seconds between nodes reporting state to datastore
+ (default: '10')
+ (a positive integer)
+ --s3_dmz: s3 dmz ip (for instances)
+ (default: '$my_ip')
+ --s3_host: s3 host (for infrastructure)
+ (default: '$my_ip')
+ --s3_port: s3 port
+ (default: '3333')
+ (an integer)
+ --scheduler_manager: Manager for scheduler
+ (default: 'nova.scheduler.manager.SchedulerManager')
+ --scheduler_topic: the topic scheduler nodes listen on
+ (default: 'scheduler')
+ --sql_connection: connection string for sql database
+ (default: 'sqlite:///$state_path/nova.sqlite')
+ --sql_idle_timeout: timeout for idle sql database connections
+ (default: '3600')
+ --sql_max_retries: sql connection attempts
+ (default: '12')
+ (an integer)
+ --sql_retry_interval: sql connection retry interval
+ (default: '10')
+ (an integer)
+ --state_path: Top-level directory for maintaining nova's state
+ (default: '/usr/lib/pymodules/python2.6/nova/../')
+ --[no]use_syslog: output to syslog
+ (default: 'false')
+ --[no]verbose: show debug output
+ (default: 'false')
+ --volume_manager: Manager for volume
+ (default: 'nova.volume.manager.VolumeManager')
+ --volume_name_template: Template string to be used to generate instance names
+ (default: 'volume-%08x')
+ --volume_topic: the topic volume nodes listen on
+ (default: 'volume')
+ --vpn_image_id: AMI for cloudpipe vpn server
+ (default: 'ami-cloudpipe')
+ --vpn_key_suffix: Suffix to add to project name for vpn key and secgroups
+ (default: '-vpn') \ No newline at end of file
diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/runnova/getting.started.rst
index 675d8e664..4cc7307b0 100644
--- a/doc/source/adminguide/getting.started.rst
+++ b/doc/source/runnova/getting.started.rst
@@ -105,11 +105,10 @@ Configuration
Configuring the host system
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-As you read through the Administration Guide you will notice configuration hints
-inline with documentation on the subsystem you are configuring. Presented in
-this "Getting Started with Nova" document, we only provide what you need to
-get started as quickly as possible. For a more detailed description of system
-configuration, start reading through :doc:`multi.node.install`.
+Nova can be configured in many different ways. In this "Getting Started with Nova" document, we only provide what you need to get started as quickly as possible. For a more detailed description of system
+configuration, start reading through `Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_.
+
+`Detailed instructions for creating a volume group are available <http://docs.openstack.org/openstack-compute/admin/content/ch05s07.html>`_, or use these quick instructions.
* Create a volume group (you can use an actual disk for the volume group as
well)::
@@ -136,6 +135,8 @@ flagfile, so typically a file like ``nova-manage.conf`` would have as its first
line ``--flagfile=/etc/nova/nova.conf`` to load the common flags before
specifying overrides or additional options.
+To get a current comprehensive list of flag file options, run bin/nova-<servicename> --help, or refer to a static list at `Reference for Flags in nova.conf <http://docs.openstack.org/openstack-compute/admin/content/ch05s08.html>`_.
+
A sample configuration to test the system follows::
--verbose
@@ -143,13 +144,13 @@ A sample configuration to test the system follows::
--auth_driver=nova.auth.dbdriver.DbDriver
Running
----------
+-------
There are many parts to the nova system, each with a specific function. They
are built to be highly-available, so there are may configurations they can be
run in (ie: on many machines, many listeners per machine, etc). This part
of the guide only gets you started quickly, to learn about HA options, see
-:doc:`multi.node.install`.
+`Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_.
Launch supporting services
diff --git a/doc/source/adminguide/index.rst b/doc/source/runnova/index.rst
index 3bd72cfdc..283d268ce 100644
--- a/doc/source/adminguide/index.rst
+++ b/doc/source/runnova/index.rst
@@ -15,17 +15,17 @@
License for the specific language governing permissions and limitations
under the License.
-Administration Guide
-====================
+Running Nova
+============
-This guide describes the basics of running and managing Nova.
+This guide describes the basics of running and managing Nova. For more administrator's documentation, refer to `docs.openstack.org <http://docs.openstack.org>`_.
Running the Cloud
-----------------
-The fastest way to get a test cloud running is by following the directions in the :doc:`../quickstart`.
+The fastest way to get a test cloud running is by following the directions in the :doc:`../quickstart`. It relies on a nova.sh script to run on a single machine.
-Nova's cloud works via the interaction of a series of daemon processes that reside persistently on the host machine(s). Fortunately, the :doc:`../quickstart` process launches sample versions of all these daemons for you. Once you are familiar with basic Nova usage, you can learn more about daemons by reading :doc:`../service.architecture` and :doc:`binaries`.
+Nova's cloud works via the interaction of a series of daemon processes that reside persistently on the host machine(s). Fortunately, the :doc:`../quickstart` process launches sample versions of all these daemons for you. Once you are familiar with basic Nova usage, you can learn more about daemons by reading :doc:`../service.architecture` and :doc:`binaries`.
Administration Utilities
------------------------
@@ -60,12 +60,12 @@ For background on the core objects referenced in this section, see :doc:`../obje
Deployment
----------
-For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq).
+For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq). For instructions on multi-server installations, refer to `Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_.
+
.. toctree::
:maxdepth: 1
- multi.node.install
dbsync
@@ -75,7 +75,6 @@ Networking
.. toctree::
:maxdepth: 1
- multi.node.install
network.vlan.rst
network.flat.rst
diff --git a/doc/source/adminguide/managing.images.rst b/doc/source/runnova/managing.images.rst
index c5d93a6e8..c5d93a6e8 100644
--- a/doc/source/adminguide/managing.images.rst
+++ b/doc/source/runnova/managing.images.rst
diff --git a/doc/source/runnova/managing.instance.types.rst b/doc/source/runnova/managing.instance.types.rst
new file mode 100644
index 000000000..746077716
--- /dev/null
+++ b/doc/source/runnova/managing.instance.types.rst
@@ -0,0 +1,84 @@
+..
+ Copyright 2011 Ken Pepple
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Managing Instance Types and Flavors
+===================================
+
+What are Instance Types or Flavors ?
+------------------------------------
+
+Instance types describe the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching. In the EC2 API, these are called by names such as "m1.large" or "m1.tiny", while the OpenStack API terms these "flavors" with names like "512 MB Server".
+
+In Nova, "flavor" and "instance type" are equivalent terms. When you create an EC2 instance type, you are also creating a OpenStack API flavor. To reduce repetition, for the rest of this document I will refer to these as instance types.
+
+Instance types can be in either the active or inactive state:
+ * Active instance types are available to be used for launching instances
+ * Inactive instance types are not available for launching instances
+
+In the current (Cactus) version of nova, instance types can only be created by the nova administrator through the nova-manage command. Future versions of nova (in concert with the OpenStack API or EC2 API), may expose this functionality directly to users.
+
+Basic Management
+----------------
+
+Instance types / flavor are managed through the nova-manage binary with
+the "instance_type" command and an appropriate subcommand. Note that you can also use
+the "flavor" command as a synonym for "instance_types".
+
+To see all currently active instance types, use the list subcommand::
+
+ # nova-manage instance_type list
+ m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+
+By default, the list subcommand only shows active instance types. To see all instance types (inactive and active), use the list subcommand with the "--all" flag::
+
+ # nova-manage instance_type list --all
+ m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.deleted: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB, inactive
+
+To create an instance type, use the "create" subcommand with the following positional arguments:
+ * memory (expressed in megabytes)
+ * vcpu(s) (integer)
+ * local storage (expressed in gigabytes)
+ * flavorid (unique integer)
+ * swap space (expressed in megabytes, defaults to zero, optional)
+ * RXTX quotas (expressed in gigabytes, defaults to zero, optional)
+ * RXTX cap (expressed in gigabytes, defaults to zero, optional)
+
+The following example creates an instance type named "m1.xxlarge"::
+
+ # nova-manage instance_type create m1.xxlarge 32768 16 320 0 0 0
+ m1.xxlarge created
+
+To delete an instance type, use the "delete" subcommand and specify the name::
+
+ # nova-manage instance_type delete m1.xxlarge
+ m1.xxlarge deleted
+
+Please note that the "delete" command only marks the instance type as
+inactive in the database; it does not actually remove the instance type. This is done
+to preserve the instance type definition for long running instances (which may not
+terminate for months or years). If you are sure that you want to delete this instance
+type from the database, pass the "--purge" flag after the name::
+
+ # nova-manage instance_type delete m1.xxlarge --purge
+ m1.xxlarge purged
diff --git a/doc/source/adminguide/managing.instances.rst b/doc/source/runnova/managing.instances.rst
index e62352017..e62352017 100644
--- a/doc/source/adminguide/managing.instances.rst
+++ b/doc/source/runnova/managing.instances.rst
diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/runnova/managing.networks.rst
index 9eea46d70..9eea46d70 100644
--- a/doc/source/adminguide/managing.networks.rst
+++ b/doc/source/runnova/managing.networks.rst
diff --git a/doc/source/adminguide/managing.projects.rst b/doc/source/runnova/managing.projects.rst
index 5dd7f2de9..5dd7f2de9 100644
--- a/doc/source/adminguide/managing.projects.rst
+++ b/doc/source/runnova/managing.projects.rst
diff --git a/doc/source/adminguide/managing.users.rst b/doc/source/runnova/managing.users.rst
index 392142e86..392142e86 100644
--- a/doc/source/adminguide/managing.users.rst
+++ b/doc/source/runnova/managing.users.rst
diff --git a/doc/source/adminguide/managingsecurity.rst b/doc/source/runnova/managingsecurity.rst
index 7893925e7..7893925e7 100644
--- a/doc/source/adminguide/managingsecurity.rst
+++ b/doc/source/runnova/managingsecurity.rst
diff --git a/doc/source/adminguide/monitoring.rst b/doc/source/runnova/monitoring.rst
index 2c93c71b5..2c93c71b5 100644
--- a/doc/source/adminguide/monitoring.rst
+++ b/doc/source/runnova/monitoring.rst
diff --git a/doc/source/adminguide/network.flat.rst b/doc/source/runnova/network.flat.rst
index 3d8680c6f..3d8680c6f 100644
--- a/doc/source/adminguide/network.flat.rst
+++ b/doc/source/runnova/network.flat.rst
diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/runnova/network.vlan.rst
index c06ce8e8b..c06ce8e8b 100644
--- a/doc/source/adminguide/network.vlan.rst
+++ b/doc/source/runnova/network.vlan.rst
diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/runnova/nova.manage.rst
index 0e9a29b6b..0636e5752 100644
--- a/doc/source/adminguide/nova.manage.rst
+++ b/doc/source/runnova/nova.manage.rst
@@ -182,6 +182,29 @@ Nova Floating IPs
Displays a list of all floating IP addresses.
+Nova Images
+~~~~~~~~~~~
+
+``nova-manage image image_register <path> <owner>``
+
+ Registers an image with the image service.
+
+``nova-manage image kernel_register <path> <owner>``
+
+ Registers a kernel with the image service.
+
+``nova-manage image ramdisk_register <path> <owner>``
+
+ Registers a ramdisk with the image service.
+
+``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
+
+ Registers an image kernel and ramdisk with the image service.
+
+``nova-manage image convert <directory>``
+
+ Converts all images in directory from the old (Bexar) format to the new format.
+
Concept: Flags
--------------
diff --git a/doc/source/runnova/vncconsole.rst b/doc/source/runnova/vncconsole.rst
new file mode 100644
index 000000000..c1fe9be39
--- /dev/null
+++ b/doc/source/runnova/vncconsole.rst
@@ -0,0 +1,76 @@
+..
+ Copyright 2010-2011 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Getting Started with the VNC Proxy
+==================================
+
+The VNC Proxy is an OpenStack component that allows users of Nova to access
+their instances through a websocket enabled browser (like Google Chrome).
+
+A VNC Connection works like so:
+
+* User connects over an api and gets a url like http://ip:port/?token=xyz
+* User pastes url in browser
+* Browser connects to VNC Proxy though a websocket enabled client like noVNC
+* VNC Proxy authorizes users token, maps the token to a host and port of an
+ instance's VNC server
+* VNC Proxy initiates connection to VNC server, and continues proxying until
+ the session ends
+
+
+Configuring the VNC Proxy
+-------------------------
+nova-vncproxy requires a websocket enabled html client to work properly. At
+this time, the only tested client is a slightly modified fork of noVNC, which
+you can at find http://github.com/openstack/noVNC.git
+
+.. todo:: add instruction for installing from package
+
+noVNC must be in the location specified by --vncproxy_wwwroot, which defaults
+to /var/lib/nova/noVNC. nova-vncproxy will fail to launch until this code
+is properly installed.
+
+By default, nova-vncproxy binds 0.0.0.0:6080. This can be configured with:
+
+* --vncproxy_port=[port]
+* --vncproxy_host=[host]
+
+
+Enabling VNC Consoles in Nova
+-----------------------------
+At the moment, VNC support is supported only when using libvirt. To enable VNC
+Console, configure the following flags:
+
+* --vnc_console_proxy_url=http://[proxy_host]:[proxy_port] - proxy_port
+ defaults to 6080. This url must point to nova-vncproxy
+* --vnc_enabled=[True|False] - defaults to True. If this flag is not set your
+ instances will launch without vnc support.
+
+
+Getting an instance's VNC Console
+---------------------------------
+You can access an instance's VNC Console url in the following methods:
+
+* Using the direct api:
+ eg: 'stack --user=admin --project=admin compute get_vnc_console instance_id=1'
+* Support for Dashboard, and the Openstack API will be forthcoming
+
+
+Accessing VNC Consoles without a web browser
+--------------------------------------------
+At the moment, VNC Consoles are only supported through the web browser, but
+more general VNC support is in the works.
diff --git a/doc/source/vmwareapi_readme.rst b/doc/source/vmwareapi_readme.rst
new file mode 100644
index 000000000..85f2694c0
--- /dev/null
+++ b/doc/source/vmwareapi_readme.rst
@@ -0,0 +1,218 @@
+..
+ Copyright (c) 2010 Citrix Systems, Inc.
+ Copyright 2010 OpenStack LLC.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+VMware ESX/ESXi Server Support for OpenStack Compute
+====================================================
+
+Introduction
+------------
+A module named 'vmwareapi' is added to 'nova.virt' to add support of VMware ESX/ESXi hypervisor to OpenStack compute (Nova). Nova may now use VMware vSphere as a compute provider.
+
+The basic requirement is to support VMware vSphere 4.1 as a compute provider within Nova. As the deployment architecture, support both ESX and ESXi. VM storage is restricted to VMFS volumes on local drives. vCenter is not required by the current design, and is not currently supported. Instead, Nova Compute talks directly to ESX/ESXi.
+
+The 'vmwareapi' module is integrated with Glance, so that VM images can be streamed from there for boot on ESXi using Glance server for image storage & retrieval.
+
+Currently supports Nova's flat networking model (Flat Manager) & VLAN networking model.
+
+.. image:: images/vmwareapi_blockdiagram.jpg
+
+
+System Requirements
+-------------------
+Following software components are required for building the cloud using OpenStack on top of ESX/ESXi Server(s):
+
+* OpenStack
+* Glance Image service
+* VMware ESX v4.1 or VMware ESXi(licensed) v4.1
+
+VMware ESX Requirements
+-----------------------
+* ESX credentials with administration/root privileges
+* Single local hard disk at the ESX host
+* An ESX Virtual Machine Port Group (For Flat Networking)
+* An ESX physical network adapter (For VLAN networking)
+* Need to enable "vSphere Web Access" in "vSphere client" UI at Configuration->Security Profile->Firewall
+
+Python dependencies
+-------------------
+* suds-0.4
+
+* Installation procedure on Ubuntu/Debian
+
+::
+
+ easy_install suds==0.4
+
+
+Configuration flags required for nova-compute
+---------------------------------------------
+::
+
+ --connection_type=vmwareapi
+ --vmwareapi_host_ip=<VMware ESX Host IP>
+ --vmwareapi_host_username=<VMware ESX Username>
+ --vmwareapi_host_password=<VMware ESX Password>
+ --network_driver=nova.network.vmwareapi_net [Optional, only for VLAN Networking]
+ --vlan_interface=<Physical ethernet adapter name in VMware ESX host for vlan networking E.g vmnic0> [Optional, only for VLAN Networking]
+
+
+Configuration flags required for nova-network
+---------------------------------------------
+::
+
+ --network_manager=nova.network.manager.FlatManager [or nova.network.manager.VlanManager]
+ --flat_network_bridge=<ESX Virtual Machine Port Group> [Optional, only for Flat Networking]
+
+
+Configuration flags required for nova-console
+---------------------------------------------
+::
+
+ --console_manager=nova.console.vmrc_manager.ConsoleVMRCManager
+ --console_driver=nova.console.vmrc.VMRCSessionConsole [Optional, only for OTP (One time Passwords) as against host credentials]
+
+
+Other flags
+-----------
+::
+
+ --image_service=nova.image.glance.GlanceImageService
+ --glance_host=<Glance Host>
+ --vmwareapi_wsdl_loc=<http://<WEB SERVER>/vimService.wsdl>
+
+Note:- Due to a faulty wsdl being shipped with ESX vSphere 4.1 we need a working wsdl which can to be mounted on any webserver. Follow the below steps to download the SDK,
+
+* Go to http://www.vmware.com/support/developer/vc-sdk/
+* Go to section VMware vSphere Web Services SDK 4.0
+* Click "Downloads"
+* Enter VMware credentials when prompted for download
+* Unzip the downloaded file vi-sdk-4.0.0-xxx.zip
+* Go to SDK->WSDL->vim25 & host the files "vimService.wsdl" and "vim.wsdl" in a WEB SERVER
+* Set the flag "--vmwareapi_wsdl_loc" with url, "http://<WEB SERVER>/vimService.wsdl"
+
+
+VLAN Network Manager
+--------------------
+VLAN network support is added through a custom network driver in the nova-compute node i.e "nova.network.vmwareapi_net" and it uses a Physical ethernet adapter on the VMware ESX/ESXi host for VLAN Networking (the name of the ethernet adapter is specified as vlan_interface flag in the nova-compute configuration flag) in the nova-compute node.
+
+Using the physical adapter name the associated Virtual Switch will be determined. In VMware ESX there can be only one Virtual Switch associated with a Physical adapter.
+
+When VM Spawn request is issued with a VLAN ID the work flow looks like,
+
+1. Check that a Physical adapter with the given name exists. If no, throw an error.If yes, goto next step.
+
+2. Check if a Virtual Switch is associated with the Physical ethernet adapter with vlan interface name. If no, throw an error. If yes, goto next step.
+
+3. Check if a port group with the network bridge name exists. If no, create a port group in the Virtual switch with the give name and VLAN id and goto step 6. If yes, goto next step.
+
+4. Check if the port group is associated with the Virtual Switch. If no, throw an error. If yes, goto next step.
+
+5. Check if the port group is associated with the given VLAN Id. If no, throw an error. If yes, goto next step.
+
+6. Spawn the VM using this Port Group as the Network Name for the VM.
+
+
+Guest console Support
+---------------------
+| VMware VMRC console is a built-in console method providing graphical control of the VM remotely.
+|
+| VMRC Console types supported:
+| # Host based credentials
+| Not secure (Sends ESX admin credentials in clear text)
+|
+| # OTP (One time passwords)
+| Secure but creates multiple session entries in DB for each OpenStack console create request.
+| Console sessions created is can be used only once.
+|
+| Install browser based VMware ESX plugins/activex on the client machine to connect
+|
+| Windows:-
+| Internet Explorer:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-win32-x86.exe
+|
+| Mozilla Firefox:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-win32-x86.xpi
+|
+| Linux:-
+| Mozilla Firefox
+| 32-Bit Linux:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-linux-x86.xpi
+|
+| 64-Bit Linux:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-linux-x64.xpi
+|
+| OpenStack Console Details:
+| console_type = vmrc+credentials | vmrc+session
+| host = <VMware ESX Host>
+| port = <VMware ESX Port>
+| password = {'vm_id': <VMware VM ID>,'username':<VMware ESX Username>, 'password':<VMware ESX Password>} //base64 + json encoded
+|
+| Instantiate the plugin/activex object
+| # In Internet Explorer
+| <object id='vmrc' classid='CLSID:B94C2238-346E-4C5E-9B36-8CC627F35574'>
+| </object>
+|
+| # Mozilla Firefox and other browsers
+| <object id='vmrc' type='application/x-vmware-vmrc;version=2.5.0.0'>
+| </object>
+|
+| Open vmrc connection
+| # Host based credentials [type=vmrc+credentials]
+| <script type="text/javascript">
+| var MODE_WINDOW = 2;
+| var vmrc = document.getElementById('vmrc');
+| vmrc.connect(<VMware ESX Host> + ':' + <VMware ESX Port>, <VMware ESX Username>, <VMware ESX Password>, '', <VMware VM ID>, MODE_WINDOW);
+| </script>
+|
+| # OTP (One time passwords) [type=vmrc+session]
+| <script type="text/javascript">
+| var MODE_WINDOW = 2;
+| var vmrc = document.getElementById('vmrc');
+| vmrc.connectWithSession(<VMware ESX Host> + ':' + <VMware ESX Port>, <VMware VM ID>, <VMware ESX Password>, MODE_WINDOW);
+| </script>
+
+
+Assumptions
+-----------
+1. The VMware images uploaded to the image repositories have VMware Tools installed.
+
+
+FAQ
+---
+
+1. What type of disk images are supported?
+
+* Only VMware VMDK's are currently supported and of that support is available only for thick disks, thin provisioned disks are not supported.
+
+
+2. How is IP address information injected into the guest?
+
+* IP address information is injected through 'machine.id' vmx parameter (equivalent to XenStore in XenServer). This information can be retrived inside the guest using VMware tools.
+
+
+3. What is the guest tool?
+
+* The guest tool is a small python script that should be run either as a service or added to system startup. This script configures networking on the guest. The guest tool is available at tools/esx/guest_tool.py
+
+
+4. What type of consoles are supported?
+
+* VMware VMRC based consoles are supported. There are 2 options for credentials one is OTP (Secure but creates multiple session entries in DB for each OpenStack console create request.) & other is host based credentials (It may not be secure as ESX credentials are transmitted as clear text).
+
+5. What does 'Vim' refer to as far as vmwareapi module is concerned?
+
+* Vim refers to VMware Virtual Infrastructure Methodology. This is not to be confused with "VIM" editor.
+
diff --git a/etc/nova-api.conf b/etc/api-paste.ini
index 9f7e93d4c..abe8c20c4 100644
--- a/etc/nova-api.conf
+++ b/etc/api-paste.ini
@@ -67,10 +67,14 @@ paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.f
[composite:osapi]
use = egg:Paste#urlmap
/: osversions
-/v1.0: openstackapi
+/v1.0: openstackapi10
+/v1.1: openstackapi11
-[pipeline:openstackapi]
-pipeline = faultwrap auth ratelimit osapiapp
+[pipeline:openstackapi10]
+pipeline = faultwrap auth ratelimit osapiapp10
+
+[pipeline:openstackapi11]
+pipeline = faultwrap auth ratelimit extensions osapiapp11
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
@@ -79,13 +83,19 @@ paste.filter_factory = nova.api.openstack:FaultWrapper.factory
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
[filter:ratelimit]
-paste.filter_factory = nova.api.openstack.ratelimiting:RateLimitingMiddleware.factory
+paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
+
+[filter:extensions]
+paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
+
+[app:osapiapp10]
+paste.app_factory = nova.api.openstack:APIRouterV10.factory
-[app:osapiapp]
-paste.app_factory = nova.api.openstack:APIRouter.factory
+[app:osapiapp11]
+paste.app_factory = nova.api.openstack:APIRouterV11.factory
[pipeline:osversions]
pipeline = faultwrap osversionapp
[app:osversionapp]
-paste.app_factory = nova.api.openstack:Versions.factory
+paste.app_factory = nova.api.openstack.versions:Versions.factory
diff --git a/CA/.gitignore b/nova/CA/.gitignore
index fae0922bf..fae0922bf 100644
--- a/CA/.gitignore
+++ b/nova/CA/.gitignore
diff --git a/CA/geninter.sh b/nova/CA/geninter.sh
index 1fbcc9e73..4b7f5a55c 100755
--- a/CA/geninter.sh
+++ b/nova/CA/geninter.sh
@@ -23,7 +23,7 @@ mkdir -p projects/$NAME
cd projects/$NAME
cp ../../openssl.cnf.tmpl openssl.cnf
sed -i -e s/%USERNAME%/$NAME/g openssl.cnf
-mkdir certs crl newcerts private
+mkdir -p certs crl newcerts private
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
echo "10" > serial
touch index.txt
diff --git a/CA/genrootca.sh b/nova/CA/genrootca.sh
index 8f2c3ee3f..091cf17fc 100755
--- a/CA/genrootca.sh
+++ b/nova/CA/genrootca.sh
@@ -20,8 +20,9 @@ if [ -f "cacert.pem" ];
then
echo "Not installing, it's already done."
else
- cp openssl.cnf.tmpl openssl.cnf
+ cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf
sed -i -e s/%USERNAME%/ROOT/g openssl.cnf
+ mkdir -p certs crl newcerts private
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
touch index.txt
echo "10" > serial
diff --git a/CA/genvpn.sh b/nova/CA/genvpn.sh
index 7e7db185d..7e7db185d 100755
--- a/CA/genvpn.sh
+++ b/nova/CA/genvpn.sh
diff --git a/CA/newcerts/.placeholder b/nova/CA/newcerts/.placeholder
index e69de29bb..e69de29bb 100644
--- a/CA/newcerts/.placeholder
+++ b/nova/CA/newcerts/.placeholder
diff --git a/CA/openssl.cnf.tmpl b/nova/CA/openssl.cnf.tmpl
index dd81f1c2b..dd81f1c2b 100644
--- a/CA/openssl.cnf.tmpl
+++ b/nova/CA/openssl.cnf.tmpl
diff --git a/CA/private/.placeholder b/nova/CA/private/.placeholder
index e69de29bb..e69de29bb 100644
--- a/CA/private/.placeholder
+++ b/nova/CA/private/.placeholder
diff --git a/CA/projects/.gitignore b/nova/CA/projects/.gitignore
index 72e8ffc0d..72e8ffc0d 100644
--- a/CA/projects/.gitignore
+++ b/nova/CA/projects/.gitignore
diff --git a/CA/projects/.placeholder b/nova/CA/projects/.placeholder
index e69de29bb..e69de29bb 100644
--- a/CA/projects/.placeholder
+++ b/nova/CA/projects/.placeholder
diff --git a/CA/reqs/.gitignore b/nova/CA/reqs/.gitignore
index 72e8ffc0d..72e8ffc0d 100644
--- a/CA/reqs/.gitignore
+++ b/nova/CA/reqs/.gitignore
diff --git a/CA/reqs/.placeholder b/nova/CA/reqs/.placeholder
index e69de29bb..e69de29bb 100644
--- a/CA/reqs/.placeholder
+++ b/nova/CA/reqs/.placeholder
diff --git a/nova/adminclient.py b/nova/adminclient.py
deleted file mode 100644
index fc3c5c5fe..000000000
--- a/nova/adminclient.py
+++ /dev/null
@@ -1,476 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Nova User API client library.
-"""
-
-import base64
-import boto
-import boto.exception
-import httplib
-import re
-import string
-
-from boto.ec2.regioninfo import RegionInfo
-
-
-DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
-DEFAULT_REGION = 'nova'
-
-
-class UserInfo(object):
- """
- Information about a Nova user, as parsed through SAX.
-
- **Fields Include**
-
- * username
- * accesskey
- * secretkey
- * file (optional) containing zip of X509 cert & rc file
-
- """
-
- def __init__(self, connection=None, username=None, endpoint=None):
- self.connection = connection
- self.username = username
- self.endpoint = endpoint
-
- def __repr__(self):
- return 'UserInfo:%s' % self.username
-
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- if name == 'username':
- self.username = str(value)
- elif name == 'file':
- self.file = base64.b64decode(str(value))
- elif name == 'accesskey':
- self.accesskey = str(value)
- elif name == 'secretkey':
- self.secretkey = str(value)
-
-
-class UserRole(object):
- """
- Information about a Nova user's role, as parsed through SAX.
-
- **Fields include**
-
- * role
-
- """
-
- def __init__(self, connection=None):
- self.connection = connection
- self.role = None
-
- def __repr__(self):
- return 'UserRole:%s' % self.role
-
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- if name == 'role':
- self.role = value
- else:
- setattr(self, name, str(value))
-
-
-class ProjectInfo(object):
- """
- Information about a Nova project, as parsed through SAX.
-
- **Fields include**
-
- * projectname
- * description
- * projectManagerId
- * memberIds
-
- """
-
- def __init__(self, connection=None):
- self.connection = connection
- self.projectname = None
- self.description = None
- self.projectManagerId = None
- self.memberIds = []
-
- def __repr__(self):
- return 'ProjectInfo:%s' % self.projectname
-
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- if name == 'projectname':
- self.projectname = value
- elif name == 'description':
- self.description = value
- elif name == 'projectManagerId':
- self.projectManagerId = value
- elif name == 'memberId':
- self.memberIds.append(value)
- else:
- setattr(self, name, str(value))
-
-
-class ProjectMember(object):
- """
- Information about a Nova project member, as parsed through SAX.
-
- **Fields include**
-
- * memberId
-
- """
-
- def __init__(self, connection=None):
- self.connection = connection
- self.memberId = None
-
- def __repr__(self):
- return 'ProjectMember:%s' % self.memberId
-
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- if name == 'member':
- self.memberId = value
- else:
- setattr(self, name, str(value))
-
-
-class HostInfo(object):
- """
- Information about a Nova Host, as parsed through SAX.
-
- **Fields Include**
-
- * Hostname
- * Compute service status
- * Volume service status
- * Instance count
- * Volume count
- """
-
- def __init__(self, connection=None):
- self.connection = connection
- self.hostname = None
- self.compute = None
- self.volume = None
- self.instance_count = 0
- self.volume_count = 0
-
- def __repr__(self):
- return 'Host:%s' % self.hostname
-
- # this is needed by the sax parser, so ignore the ugly name
- def startElement(self, name, attrs, connection):
- return None
-
- # this is needed by the sax parser, so ignore the ugly name
- def endElement(self, name, value, connection):
- fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
- setattr(self, fixed_name, value)
-
-
-class Vpn(object):
- """
- Information about a Vpn, as parsed through SAX
-
- **Fields Include**
-
- * instance_id
- * project_id
- * public_ip
- * public_port
- * created_at
- * internal_ip
- * state
- """
-
- def __init__(self, connection=None):
- self.connection = connection
- self.instance_id = None
- self.project_id = None
-
- def __repr__(self):
- return 'Vpn:%s:%s' % (self.project_id, self.instance_id)
-
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
- setattr(self, fixed_name, value)
-
-
-class InstanceType(object):
- """
- Information about a Nova instance type, as parsed through SAX.
-
- **Fields include**
-
- * name
- * vcpus
- * disk_gb
- * memory_mb
- * flavor_id
-
- """
-
- def __init__(self, connection=None):
- self.connection = connection
- self.name = None
- self.vcpus = None
- self.disk_gb = None
- self.memory_mb = None
- self.flavor_id = None
-
- def __repr__(self):
- return 'InstanceType:%s' % self.name
-
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- if name == "memoryMb":
- self.memory_mb = str(value)
- elif name == "flavorId":
- self.flavor_id = str(value)
- elif name == "diskGb":
- self.disk_gb = str(value)
- else:
- setattr(self, name, str(value))
-
-
-class NovaAdminClient(object):
-
- def __init__(
- self,
- clc_url=DEFAULT_CLC_URL,
- region=DEFAULT_REGION,
- access_key=None,
- secret_key=None,
- **kwargs):
- parts = self.split_clc_url(clc_url)
-
- self.clc_url = clc_url
- self.region = region
- self.access = access_key
- self.secret = secret_key
- self.apiconn = boto.connect_ec2(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=parts['is_secure'],
- region=RegionInfo(None,
- region,
- parts['ip']),
- port=parts['port'],
- path='/services/Admin',
- **kwargs)
- self.apiconn.APIVersion = 'nova'
-
- def connection_for(self, username, project, clc_url=None, region=None,
- **kwargs):
- """Returns a boto ec2 connection for the given username."""
- if not clc_url:
- clc_url = self.clc_url
- if not region:
- region = self.region
- parts = self.split_clc_url(clc_url)
- user = self.get_user(username)
- access_key = '%s:%s' % (user.accesskey, project)
- return boto.connect_ec2(aws_access_key_id=access_key,
- aws_secret_access_key=user.secretkey,
- is_secure=parts['is_secure'],
- region=RegionInfo(None,
- self.region,
- parts['ip']),
- port=parts['port'],
- path='/services/Cloud',
- **kwargs)
-
- def split_clc_url(self, clc_url):
- """Splits a cloud controller endpoint url."""
- parts = httplib.urlsplit(clc_url)
- is_secure = parts.scheme == 'https'
- ip, port = parts.netloc.split(':')
- return {'ip': ip, 'port': int(port), 'is_secure': is_secure}
-
- def get_users(self):
- """Grabs the list of all users."""
- return self.apiconn.get_list('DescribeUsers', {}, [('item', UserInfo)])
-
- def get_user(self, name):
- """Grab a single user by name."""
- try:
- return self.apiconn.get_object('DescribeUser',
- {'Name': name},
- UserInfo)
- except boto.exception.BotoServerError, e:
- if e.status == 400 and e.error_code == 'NotFound':
- return None
- raise
-
- def has_user(self, username):
- """Determine if user exists."""
- return self.get_user(username) != None
-
- def create_user(self, username):
- """Creates a new user, returning the userinfo object with
- access/secret."""
- return self.apiconn.get_object('RegisterUser', {'Name': username},
- UserInfo)
-
- def delete_user(self, username):
- """Deletes a user."""
- return self.apiconn.get_object('DeregisterUser', {'Name': username},
- UserInfo)
-
- def get_roles(self, project_roles=True):
- """Returns a list of available roles."""
- return self.apiconn.get_list('DescribeRoles',
- {'ProjectRoles': project_roles},
- [('item', UserRole)])
-
- def get_user_roles(self, user, project=None):
- """Returns a list of roles for the given user.
-
- Omitting project will return any global roles that the user has.
- Specifying project will return only project specific roles.
-
- """
- params = {'User': user}
- if project:
- params['Project'] = project
- return self.apiconn.get_list('DescribeUserRoles',
- params,
- [('item', UserRole)])
-
- def add_user_role(self, user, role, project=None):
- """Add a role to a user either globally or for a specific project."""
- return self.modify_user_role(user, role, project=project,
- operation='add')
-
- def remove_user_role(self, user, role, project=None):
- """Remove a role from a user either globally or for a specific
- project."""
- return self.modify_user_role(user, role, project=project,
- operation='remove')
-
- def modify_user_role(self, user, role, project=None, operation='add',
- **kwargs):
- """Add or remove a role for a user and project."""
- params = {'User': user,
- 'Role': role,
- 'Project': project,
- 'Operation': operation}
- return self.apiconn.get_status('ModifyUserRole', params)
-
- def get_projects(self, user=None):
- """Returns a list of all projects."""
- if user:
- params = {'User': user}
- else:
- params = {}
- return self.apiconn.get_list('DescribeProjects',
- params,
- [('item', ProjectInfo)])
-
- def get_project(self, name):
- """Returns a single project with the specified name."""
- project = self.apiconn.get_object('DescribeProject',
- {'Name': name},
- ProjectInfo)
-
- if project.projectname != None:
- return project
-
- def create_project(self, projectname, manager_user, description=None,
- member_users=None):
- """Creates a new project."""
- params = {'Name': projectname,
- 'ManagerUser': manager_user,
- 'Description': description,
- 'MemberUsers': member_users}
- return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
-
- def modify_project(self, projectname, manager_user=None, description=None):
- """Modifies an existing project."""
- params = {'Name': projectname,
- 'ManagerUser': manager_user,
- 'Description': description}
- return self.apiconn.get_status('ModifyProject', params)
-
- def delete_project(self, projectname):
- """Permanently deletes the specified project."""
- return self.apiconn.get_object('DeregisterProject',
- {'Name': projectname},
- ProjectInfo)
-
- def get_project_members(self, name):
- """Returns a list of members of a project."""
- return self.apiconn.get_list('DescribeProjectMembers',
- {'Name': name},
- [('item', ProjectMember)])
-
- def add_project_member(self, user, project):
- """Adds a user to a project."""
- return self.modify_project_member(user, project, operation='add')
-
- def remove_project_member(self, user, project):
- """Removes a user from a project."""
- return self.modify_project_member(user, project, operation='remove')
-
- def modify_project_member(self, user, project, operation='add'):
- """Adds or removes a user from a project."""
- params = {'User': user,
- 'Project': project,
- 'Operation': operation}
- return self.apiconn.get_status('ModifyProjectMember', params)
-
- def get_zip(self, user, project):
- """Returns the content of a zip file containing novarc and access
- credentials."""
- params = {'Name': user, 'Project': project}
- zip = self.apiconn.get_object('GenerateX509ForUser', params, UserInfo)
- return zip.file
-
- def start_vpn(self, project):
- """
- Starts the vpn for a user
- """
- return self.apiconn.get_object('StartVpn', {'Project': project}, Vpn)
-
- def get_vpns(self):
- """Return a list of vpn with project name"""
- return self.apiconn.get_list('DescribeVpns', {}, [('item', Vpn)])
-
- def get_hosts(self):
- return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
-
- def get_instance_types(self):
- """Grabs the list of all users."""
- return self.apiconn.get_list('DescribeInstanceTypes', {},
- [('item', InstanceType)])
diff --git a/nova/api/direct.py b/nova/api/direct.py
index 208b6d086..e5f33cee4 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -38,6 +38,7 @@ import routes
import webob
from nova import context
+from nova import exception
from nova import flags
from nova import utils
from nova import wsgi
@@ -187,7 +188,7 @@ class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
@@ -205,10 +206,53 @@ class ServiceWrapper(wsgi.Controller):
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = dict([(str(k), v) for (k, v) in params.iteritems()])
result = method(context, **params)
- if type(result) is dict or type(result) is list:
- return self._serialize(result, req)
- else:
+ if result is None or type(result) is str or type(result) is unicode:
return result
+ try:
+ return self._serialize(result, req.best_match_content_type())
+ except:
+ raise exception.Error("returned non-serializable type: %s"
+ % result)
+
+
+class Limited(object):
+ __notdoc = """Limit the available methods on a given object.
+
+ (Not a docstring so that the docstring can be conditionally overriden.)
+
+ Useful when defining a public API that only exposes a subset of an
+ internal API.
+
+ Expected usage of this class is to define a subclass that lists the allowed
+ methods in the 'allowed' variable.
+
+ Additionally where appropriate methods can be added or overwritten, for
+ example to provide backwards compatibility.
+
+ The wrapping approach has been chosen so that the wrapped API can maintain
+ its own internal consistency, for example if it calls "self.create" it
+ should get its own create method rather than anything we do here.
+
+ """
+
+ _allowed = None
+
+ def __init__(self, proxy):
+ self._proxy = proxy
+ if not self.__doc__:
+ self.__doc__ = proxy.__doc__
+ if not self._allowed:
+ self._allowed = []
+
+ def __getattr__(self, key):
+ """Only return methods that are named in self._allowed."""
+ if key not in self._allowed:
+ raise AttributeError()
+ return getattr(self._proxy, key)
+
+ def __dir__(self):
+ """Only return methods that are named in self._allowed."""
+ return [x for x in dir(self._proxy) if x in self._allowed]
class Proxy(object):
@@ -218,7 +262,7 @@ class Proxy(object):
self.prefix = prefix
def __do_request(self, path, context, **kwargs):
- req = webob.Request.blank(path)
+ req = wsgi.Request.blank(path)
req.method = 'POST'
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
req.environ['openstack.context'] = context
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 7a9c4f957..a5ca14672 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -31,7 +31,7 @@ from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
-from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
from nova.auth import manager
@@ -51,7 +51,7 @@ flags.DEFINE_integer('lockout_window', 15,
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = utils.utcnow()
rv = req.get_response(self.application)
@@ -59,10 +59,13 @@ class RequestLogging(wsgi.Middleware):
return rv
def log_request_completion(self, response, request, start):
- controller = request.environ.get('ec2.controller', None)
- if controller:
- controller = controller.__class__.__name__
- action = request.environ.get('ec2.action', None)
+ apireq = request.environ.get('ec2.request', None)
+ if apireq:
+ controller = apireq.controller
+ action = apireq.action
+ else:
+ controller = None
+ action = None
ctxt = request.environ.get('ec2.context', None)
delta = utils.utcnow() - start
seconds = delta.seconds
@@ -73,7 +76,7 @@ class RequestLogging(wsgi.Middleware):
microseconds,
request.remote_addr,
request.method,
- request.path_info,
+ "%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
@@ -110,7 +113,7 @@ class Lockout(wsgi.Middleware):
debug=0)
super(Lockout, self).__init__(application)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
@@ -139,7 +142,7 @@ class Authenticate(wsgi.Middleware):
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# Read request signature and access id.
try:
@@ -188,7 +191,7 @@ class Requestify(wsgi.Middleware):
super(Requestify, self).__init__(app)
self.controller = utils.import_class(controller)()
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
@@ -196,6 +199,12 @@ class Requestify(wsgi.Middleware):
try:
# Raise KeyError if omitted
action = req.params['Action']
+ # Fix bug lp:720157 for older (version 1) clients
+ version = req.params['SignatureVersion']
+ if int(version) == 1:
+ non_args.remove('SignatureMethod')
+ if 'SignatureMethod' in args:
+ args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
@@ -267,7 +276,7 @@ class Authorizer(wsgi.Middleware):
},
}
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
@@ -301,7 +310,7 @@ class Executor(wsgi.Application):
response, or a 400 upon failure.
"""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
api_request = req.environ['ec2.request']
@@ -311,13 +320,13 @@ class Executor(wsgi.Application):
except exception.InstanceNotFound as ex:
LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
context=context)
- ec2_id = cloud.id_to_ec2_id(ex.instance_id)
+ ec2_id = ec2utils.id_to_ec2_id(ex.instance_id)
message = _('Instance %s not found') % ec2_id
return self._error(req, context, type(ex).__name__, message)
except exception.VolumeNotFound as ex:
LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
context=context)
- ec2_id = cloud.id_to_ec2_id(ex.volume_id, 'vol-%08x')
+ ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x')
message = _('Volume %s not found') % ec2_id
return self._error(req, context, type(ex).__name__, message)
except exception.NotFound as ex:
@@ -363,7 +372,7 @@ class Executor(wsgi.Application):
class Versions(wsgi.Application):
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all EC2 versions."""
# available api versions
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index e2a05fce1..6a5609d4a 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -28,8 +28,8 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
+from nova.api.ec2 import ec2utils
from nova.auth import manager
-from nova.compute import instance_types
FLAGS = flags.FLAGS
@@ -61,7 +61,7 @@ def project_dict(project):
def host_dict(host, compute_service, instances, volume_service, volumes, now):
"""Convert a host model object to a result dict"""
- rv = {'hostanme': host, 'instance_count': len(instances),
+ rv = {'hostname': host, 'instance_count': len(instances),
'volume_count': len(volumes)}
if compute_service:
latest = compute_service['updated_at'] or compute_service['created_at']
@@ -80,8 +80,8 @@ def host_dict(host, compute_service, instances, volume_service, volumes, now):
return rv
-def instance_dict(name, inst):
- return {'name': name,
+def instance_dict(inst):
+ return {'name': inst['name'],
'memory_mb': inst['memory_mb'],
'vcpus': inst['vcpus'],
'disk_gb': inst['local_gb'],
@@ -93,15 +93,18 @@ def vpn_dict(project, vpn_instance):
'public_ip': project.vpn_ip,
'public_port': project.vpn_port}
if vpn_instance:
- rv['instance_id'] = vpn_instance['ec2_id']
+ rv['instance_id'] = ec2utils.id_to_ec2_id(vpn_instance['id'])
rv['created_at'] = utils.isotime(vpn_instance['created_at'])
address = vpn_instance.get('fixed_ip', None)
if address:
rv['internal_ip'] = address['address']
- if utils.vpn_ping(project.vpn_ip, project.vpn_port):
- rv['state'] = 'running'
+ if project.vpn_ip and project.vpn_port:
+ if utils.vpn_ping(project.vpn_ip, project.vpn_port):
+ rv['state'] = 'running'
+ else:
+ rv['state'] = 'down'
else:
- rv['state'] = 'down'
+ rv['state'] = 'down - invalid project vpn config'
else:
rv['state'] = 'pending'
return rv
@@ -115,9 +118,10 @@ class AdminController(object):
def __str__(self):
return 'AdminController'
- def describe_instance_types(self, _context, **_kwargs):
- return {'instanceTypeSet': [instance_dict(n, v) for n, v in
- instance_types.INSTANCE_TYPES.iteritems()]}
+ def describe_instance_types(self, context, **_kwargs):
+ """Returns all active instance types data (vcpus, memory, etc.)"""
+ return {'instanceTypeSet': [instance_dict(v) for v in
+ db.instance_type_get_all(context).values()]}
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
@@ -280,7 +284,7 @@ class AdminController(object):
", ensure it isn't running, and try "
"again in a few minutes")
instance = self._vpn_for(context, project)
- return {'instance_id': instance['ec2_id']}
+ return {'instance_id': ec2utils.id_to_ec2_id(instance['id'])}
def describe_vpns(self, context):
vpns = []
@@ -300,7 +304,7 @@ class AdminController(object):
* Volume (up, down, None)
* Volume Count
"""
- services = db.service_get_all(context)
+ services = db.service_get_all(context, False)
now = datetime.datetime.utcnow()
hosts = []
rv = []
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 2b1acba5a..d7ad08d2f 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -52,7 +52,23 @@ def _database_to_isoformat(datetimeobj):
def _try_convert(value):
- """Return a non-string if possible"""
+ """Return a non-string from a string or unicode, if possible.
+
+ ============= =====================================================
+ When value is returns
+ ============= =====================================================
+ zero-length ''
+ 'None' None
+ 'True' True
+ 'False' False
+ '0', '-0' 0
+ 0xN, -0xN int from hex (postitive) (N is any number)
+ 0bN, -0bN int from binary (positive) (N is any number)
+ * try conversion to int, float, complex, fallback value
+
+ """
+ if len(value) == 0:
+ return ''
if value == 'None':
return None
if value == 'True':
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 7458d307a..99520b302 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -39,7 +39,9 @@ from nova import log as logging
from nova import network
from nova import utils
from nova import volume
+from nova.api.ec2 import ec2utils
from nova.compute import instance_types
+from nova.image import s3
FLAGS = flags.FLAGS
@@ -73,30 +75,19 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
-def ec2_id_to_id(ec2_id):
- """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
- return int(ec2_id.split('-')[-1], 16)
-
-
-def id_to_ec2_id(instance_id, template='i-%08x'):
- """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
- return template % instance_id
-
-
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
- self.image_service = utils.import_object(FLAGS.image_service)
+ self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.compute_api = compute.API(
network_api=self.network_api,
- image_service=self.image_service,
volume_api=self.volume_api,
- hostname_factory=id_to_ec2_id)
+ hostname_factory=ec2utils.id_to_ec2_id)
self.setup()
def __str__(self):
@@ -112,10 +103,17 @@ class CloudController(object):
# Gen root CA, if we don't have one
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
if not os.path.exists(root_ca_path):
+ genrootca_sh_path = os.path.join(os.path.dirname(__file__),
+ os.path.pardir,
+ os.path.pardir,
+ 'CA',
+ 'genrootca.sh')
+
start = os.getcwd()
+ os.makedirs(FLAGS.ca_path)
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
- utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh")
+ utils.runthis(_("Generating root CA: %s"), "sh", genrootca_sh_path)
os.chdir(start)
def _get_mpi_data(self, context, project_id):
@@ -154,11 +152,12 @@ class CloudController(object):
availability_zone = self._get_availability_zone_by_host(ctxt, host)
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
- ec2_id = id_to_ec2_id(instance_ref['id'])
+ ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
+ image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine')
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
- 'ami-id': instance_ref['image_id'],
+ 'ami-id': image_ec2_id,
'ami-launch-index': instance_ref['launch_index'],
'ami-manifest-path': 'FIXME',
'block-device-mapping': {
@@ -173,15 +172,20 @@ class CloudController(object):
'instance-type': instance_ref['instance_type'],
'local-hostname': hostname,
'local-ipv4': address,
- 'kernel-id': instance_ref['kernel_id'],
'placement': {'availability-zone': availability_zone},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
- 'ramdisk-id': instance_ref['ramdisk_id'],
'reservation-id': instance_ref['reservation_id'],
'security-groups': '',
'mpi': mpi}}
+
+ for image_type in ['kernel', 'ramdisk']:
+ if '%s_id' % image_type in instance_ref:
+ ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type],
+ image_type)
+ data['meta-data']['%s-id' % image_type] = ec2_id
+
if False: # TODO(vish): store ancestor ids
data['ancestor-ami-ids'] = []
if False: # TODO(vish): store product codes
@@ -199,7 +203,7 @@ class CloudController(object):
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
- enabled_services = db.service_get_all(ctxt)
+ enabled_services = db.service_get_all(ctxt, False)
disabled_services = db.service_get_all(ctxt, True)
available_zones = []
for zone in [service.availability_zone for service
@@ -224,7 +228,7 @@ class CloudController(object):
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
- services = db.service_get_all(context)
+ services = db.service_get_all(context, False)
now = datetime.datetime.utcnow()
hosts = []
for host in [service['host'] for service in services]:
@@ -298,7 +302,7 @@ class CloudController(object):
'keyFingerprint': key_pair['fingerprint'],
})
- return {'keypairsSet': result}
+ return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
@@ -525,7 +529,7 @@ class CloudController(object):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
now = datetime.datetime.utcnow()
@@ -535,16 +539,23 @@ class CloudController(object):
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
return self.compute_api.get_ajax_console(context,
instance_id=instance_id)
+ def get_vnc_console(self, context, instance_id, **kwargs):
+ """Returns vnc browser url. Used by OS dashboard."""
+ ec2_id = instance_id
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ return self.compute_api.get_vnc_console(context,
+ instance_id=instance_id)
+
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
- internal_id = ec2_id_to_id(ec2_id)
- volume = self.volume_api.get(context, internal_id)
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
+ volume = self.volume_api.get(context, volume_id=internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
@@ -556,11 +567,11 @@ class CloudController(object):
instance_data = None
if volume.get('instance', None):
instance_id = volume['instance']['id']
- instance_ec2_id = id_to_ec2_id(instance_id)
+ instance_ec2_id = ec2utils.id_to_ec2_id(instance_id)
instance_data = '%s[%s]' % (instance_ec2_id,
volume['instance']['host'])
v = {}
- v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x')
+ v['volumeId'] = ec2utils.id_to_ec2_id(volume['id'], 'vol-%08x')
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
@@ -568,7 +579,7 @@ class CloudController(object):
if context.is_admin:
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
- volume['user_id'],
+ volume['project_id'],
volume['host'],
instance_data,
volume['mountpoint'])
@@ -578,8 +589,7 @@ class CloudController(object):
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
- 'volumeId': id_to_ec2_id(volume['id'],
- 'vol-%08x')}]
+ 'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
@@ -589,33 +599,37 @@ class CloudController(object):
def create_volume(self, context, size, **kwargs):
LOG.audit(_("Create volume of %s GB"), size, context=context)
- volume = self.volume_api.create(context, size,
- kwargs.get('display_name'),
- kwargs.get('display_description'))
+ volume = self.volume_api.create(
+ context,
+ size=size,
+ name=kwargs.get('display_name'),
+ description=kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return {'volumeSet': [self._format_volume(context, dict(volume))]}
def delete_volume(self, context, volume_id, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
self.volume_api.delete(context, volume_id=volume_id)
return True
def update_volume(self, context, volume_id, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- self.volume_api.update(context, volume_id, kwargs)
+ self.volume_api.update(context,
+ volume_id=volume_id,
+ fields=changes)
return True
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
- instance_id = ec2_id_to_id(instance_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
@@ -623,25 +637,25 @@ class CloudController(object):
instance_id=instance_id,
volume_id=volume_id,
device=device)
- volume = self.volume_api.get(context, volume_id)
+ volume = self.volume_api.get(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
- 'instanceId': id_to_ec2_id(instance_id),
+ 'instanceId': ec2utils.id_to_ec2_id(instance_id),
'requestId': context.request_id,
'status': volume['attach_status'],
- 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
+ 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def detach_volume(self, context, volume_id, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
- volume = self.volume_api.get(context, volume_id)
+ volume = self.volume_api.get(context, volume_id=volume_id)
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
- 'instanceId': id_to_ec2_id(instance['id']),
+ 'instanceId': ec2utils.id_to_ec2_id(instance['id']),
'requestId': context.request_id,
'status': volume['attach_status'],
- 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
+ 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -675,7 +689,7 @@ class CloudController(object):
if instance_id:
instances = []
for ec2_id in instance_id:
- internal_id = ec2_id_to_id(ec2_id)
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
instance = self.compute_api.get(context,
instance_id=internal_id)
instances.append(instance)
@@ -687,9 +701,9 @@ class CloudController(object):
continue
i = {}
instance_id = instance['id']
- ec2_id = id_to_ec2_id(instance_id)
+ ec2_id = ec2utils.id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
- i['imageId'] = instance['image_id']
+ i['imageId'] = self._image_ec2_id(instance['image_id'])
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']}
@@ -750,12 +764,14 @@ class CloudController(object):
iterator = db.floating_ip_get_all_by_project(context,
context.project_id)
for floating_ip_ref in iterator:
+ if floating_ip_ref['project_id'] is None:
+ continue
address = floating_ip_ref['address']
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
instance_id = floating_ip_ref['fixed_ip']['instance']['id']
- ec2_id = id_to_ec2_id(instance_id)
+ ec2_id = ec2utils.id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
if context.is_admin:
@@ -772,13 +788,13 @@ class CloudController(object):
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
- self.network_api.release_floating_ip(context, public_ip)
+ self.network_api.release_floating_ip(context, address=public_ip)
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
- instance_id = ec2_id_to_id(instance_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
address=public_ip)
@@ -786,18 +802,24 @@ class CloudController(object):
def disassociate_address(self, context, public_ip, **kwargs):
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
- self.network_api.disassociate_floating_ip(context, public_ip)
+ self.network_api.disassociate_floating_ip(context, address=public_ip)
return {'disassociateResponse': ["Address disassociated."]}
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
+ if kwargs.get('kernel_id'):
+ kernel = self._get_image(context, kwargs['kernel_id'])
+ kwargs['kernel_id'] = kernel['id']
+ if kwargs.get('ramdisk_id'):
+ ramdisk = self._get_image(context, kwargs['ramdisk_id'])
+ kwargs['ramdisk_id'] = ramdisk['id']
instances = self.compute_api.create(context,
instance_type=instance_types.get_by_type(
kwargs.get('instance_type', None)),
- image_id=kwargs['image_id'],
+ image_id=self._get_image(context, kwargs['image_id'])['id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
- kernel_id=kwargs.get('kernel_id', None),
+ kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
display_name=kwargs.get('display_name'),
display_description=kwargs.get('display_description'),
@@ -814,7 +836,7 @@ class CloudController(object):
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.delete(context, instance_id=instance_id)
return True
@@ -822,64 +844,106 @@ class CloudController(object):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.reboot(context, instance_id=instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2_id_to_id(instance_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.rescue(context, instance_id=instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2_id_to_id(instance_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.unrescue(context, instance_id=instance_id)
return True
- def update_instance(self, context, ec2_id, **kwargs):
+ def update_instance(self, context, instance_id, **kwargs):
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
- def _format_image(self, context, image):
+ _type_prefix_map = {'machine': 'ami',
+ 'kernel': 'aki',
+ 'ramdisk': 'ari'}
+
+ def _image_ec2_id(self, image_id, image_type='machine'):
+ prefix = self._type_prefix_map[image_type]
+ template = prefix + '-%08x'
+ return ec2utils.id_to_ec2_id(int(image_id), template=template)
+
+ def _get_image(self, context, ec2_id):
+ try:
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
+ return self.image_service.show(context, internal_id)
+ except exception.NotFound:
+ return self.image_service.show_by_name(context, ec2_id)
+
+ def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
i = {}
- i['imageId'] = image.get('id')
- i['kernelId'] = image.get('kernel_id')
- i['ramdiskId'] = image.get('ramdisk_id')
- i['imageOwnerId'] = image.get('owner_id')
- i['imageLocation'] = image.get('location')
- i['imageState'] = image.get('status')
- i['type'] = image.get('type')
- i['isPublic'] = image.get('is_public')
- i['architecture'] = image.get('architecture')
+ image_type = image['properties'].get('type')
+ ec2_id = self._image_ec2_id(image.get('id'), image_type)
+ name = image.get('name')
+ i['imageId'] = ec2_id
+ kernel_id = image['properties'].get('kernel_id')
+ if kernel_id:
+ i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel')
+ ramdisk_id = image['properties'].get('ramdisk_id')
+ if ramdisk_id:
+ i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk')
+ i['imageOwnerId'] = image['properties'].get('owner_id')
+ if name:
+ i['imageLocation'] = "%s (%s)" % (image['properties'].
+ get('image_location'), name)
+ else:
+ i['imageLocation'] = image['properties'].get('image_location')
+ i['imageState'] = image['properties'].get('image_state')
+ i['displayName'] = name
+ i['description'] = image.get('description')
+ i['imageType'] = image_type
+ i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True'
+ i['architecture'] = image['properties'].get('architecture')
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
- images = self.image_service.index(context)
if image_id:
- images = filter(lambda x: x['id'] in image_id, images)
- images = [self._format_image(context, i) for i in images]
+ images = []
+ for ec2_id in image_id:
+ try:
+ image = self._get_image(context, ec2_id)
+ except exception.NotFound:
+ raise exception.NotFound(_('Image %s not found') %
+ ec2_id)
+ images.append(image)
+ else:
+ images = self.image_service.detail(context)
+ images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
- self.image_service.deregister(context, image_id)
+ image = self._get_image(context, image_id)
+ internal_id = image['id']
+ self.image_service.delete(context, internal_id)
return {'imageId': image_id}
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
- image_id = self.image_service.register(context, image_location)
+ metadata = {'properties': {'image_location': image_location}}
+ image = self.image_service.create(context, metadata)
+ image_id = self._image_ec2_id(image['id'],
+ image['properties']['type'])
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
@@ -890,14 +954,11 @@ class CloudController(object):
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
- image = self.image_service.show(context, image_id)
- image = self._format_image(context,
- self.image_service.show(context,
- image_id))
- except IndexError:
- raise exception.ApiError(_('invalid id: %s') % image_id)
- result = {'image_id': image_id, 'launchPermission': []}
- if image['isPublic']:
+ image = self._get_image(context, image_id)
+ except exception.NotFound:
+ raise exception.NotFound(_('Image %s not found') % image_id)
+ result = {'imageId': image_id, 'launchPermission': []}
+ if image['properties']['is_public']:
result['launchPermission'].append({'group': 'all'})
return result
@@ -914,8 +975,18 @@ class CloudController(object):
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
- return self.image_service.modify(context, image_id, operation_type)
+
+ try:
+ image = self._get_image(context, image_id)
+ except exception.NotFound:
+ raise exception.NotFound(_('Image %s not found') % image_id)
+ internal_id = image['id']
+ del(image['id'])
+
+ image['properties']['is_public'] = (operation_type == 'add')
+ return self.image_service.update(context, internal_id, image)
def update_image(self, context, image_id, **kwargs):
- result = self.image_service.update(context, image_id, dict(kwargs))
+ internal_id = ec2utils.ec2_id_to_id(image_id)
+ result = self.image_service.update(context, internal_id, dict(kwargs))
return result
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
new file mode 100644
index 000000000..3b34f6ea5
--- /dev/null
+++ b/nova/api/ec2/ec2utils.py
@@ -0,0 +1,32 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+
+
+def ec2_id_to_id(ec2_id):
+ """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
+ try:
+ return int(ec2_id.split('-')[-1], 16)
+ except ValueError:
+ raise exception.NotFound(_("Id %s Not Found") % ec2_id)
+
+
+def id_to_ec2_id(instance_id, template='i-%08x'):
+ """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
+ return template % instance_id
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index 6fb441656..28f99b0ef 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -65,7 +65,7 @@ class MetadataRequestHandler(wsgi.Application):
data = data[item]
return data
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
cc = cloud.CloudController()
remote_address = req.remote_addr
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index b1b38ed2d..7545eb0c9 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -27,13 +27,18 @@ import webob.exc
from nova import flags
from nova import log as logging
from nova import wsgi
+from nova.api.openstack import accounts
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
+from nova.api.openstack import image_metadata
+from nova.api.openstack import limits
from nova.api.openstack import servers
+from nova.api.openstack import server_metadata
from nova.api.openstack import shared_ip_groups
+from nova.api.openstack import users
from nova.api.openstack import zones
@@ -47,7 +52,7 @@ flags.DEFINE_bool('allow_admin_api',
class FaultWrapper(wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
@@ -68,57 +73,102 @@ class APIRouter(wsgi.Router):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one"""
return cls()
- def __init__(self):
+ def __init__(self, ext_mgr=None):
+ self.server_members = {}
mapper = routes.Mapper()
+ self._setup_routes(mapper)
+ super(APIRouter, self).__init__(mapper)
- server_members = {'action': 'POST'}
+ def _setup_routes(self, mapper):
+ server_members = self.server_members
+ server_members['action'] = 'POST'
if FLAGS.allow_admin_api:
LOG.debug(_("Including admin operations in API."))
+
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
- server_members["diagnostics"] = "GET"
- server_members["actions"] = "GET"
+ server_members['diagnostics'] = 'GET'
+ server_members['actions'] = 'GET'
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
+ server_members['rescue'] = 'POST'
+ server_members['unrescue'] = 'POST'
server_members['reset_network'] = 'POST'
server_members['inject_network_info'] = 'POST'
mapper.resource("zone", "zones", controller=zones.Controller(),
- collection={'detail': 'GET'})
+ collection={'detail': 'GET', 'info': 'GET'}),
- mapper.resource("server", "servers", controller=servers.Controller(),
- collection={'detail': 'GET'},
- member=server_members)
+ mapper.resource("user", "users", controller=users.Controller(),
+ collection={'detail': 'GET'})
- mapper.resource("backup_schedule", "backup_schedule",
- controller=backup_schedules.Controller(),
- parent_resource=dict(member_name='server',
- collection_name='servers'))
+ mapper.resource("account", "accounts",
+ controller=accounts.Controller(),
+ collection={'detail': 'GET'})
mapper.resource("console", "consoles",
controller=consoles.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
- mapper.resource("image", "images", controller=images.Controller(),
+ _limits = limits.LimitsController()
+ mapper.resource("limit", "limits", controller=_limits)
+
+ super(APIRouter, self).__init__(mapper)
+
+
+class APIRouterV10(APIRouter):
+ """Define routes specific to OpenStack API V1.0."""
+
+ def _setup_routes(self, mapper):
+ super(APIRouterV10, self)._setup_routes(mapper)
+ mapper.resource("server", "servers",
+ controller=servers.ControllerV10(),
+ collection={'detail': 'GET'},
+ member=self.server_members)
+
+ mapper.resource("image", "images",
+ controller=images.ControllerV10(),
collection={'detail': 'GET'})
- mapper.resource("flavor", "flavors", controller=flavors.Controller(),
+
+ mapper.resource("flavor", "flavors",
+ controller=flavors.ControllerV10(),
collection={'detail': 'GET'})
+
mapper.resource("shared_ip_group", "shared_ip_groups",
collection={'detail': 'GET'},
controller=shared_ip_groups.Controller())
- super(APIRouter, self).__init__(mapper)
+ mapper.resource("backup_schedule", "backup_schedule",
+ controller=backup_schedules.Controller(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
-class Versions(wsgi.Application):
- @webob.dec.wsgify
- def __call__(self, req):
- """Respond to a request for all OpenStack API versions."""
- response = {
- "versions": [
- dict(status="CURRENT", id="v1.0")]}
- metadata = {
- "application/xml": {
- "attributes": dict(version=["status", "id"])}}
- return wsgi.Serializer(req.environ, metadata).to_content_type(response)
+class APIRouterV11(APIRouter):
+ """Define routes specific to OpenStack API V1.1."""
+
+ def _setup_routes(self, mapper):
+ super(APIRouterV11, self)._setup_routes(mapper)
+ mapper.resource("server", "servers",
+ controller=servers.ControllerV11(),
+ collection={'detail': 'GET'},
+ member=self.server_members)
+
+ mapper.resource("image", "images",
+ controller=images.ControllerV11(),
+ collection={'detail': 'GET'})
+
+ mapper.resource("image_meta", "meta",
+ controller=image_metadata.Controller(),
+ parent_resource=dict(member_name='image',
+ collection_name='images'))
+
+ mapper.resource("server_meta", "meta",
+ controller=server_metadata.Controller(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
+
+ mapper.resource("flavor", "flavors",
+ controller=flavors.ControllerV11(),
+ collection={'detail': 'GET'})
diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py
new file mode 100644
index 000000000..86066fa20
--- /dev/null
+++ b/nova/api/openstack/accounts.py
@@ -0,0 +1,86 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import common
+import webob.exc
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import wsgi
+
+from nova.auth import manager
+from nova.api.openstack import faults
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.api.openstack')
+
+
+def _translate_keys(account):
+ return dict(id=account.id,
+ name=account.name,
+ description=account.description,
+ manager=account.project_manager_id)
+
+
+class Controller(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "account": ["id", "name", "description", "manager"]}}}
+
+ def __init__(self):
+ self.manager = manager.AuthManager()
+
+ def _check_admin(self, context):
+ """We cannot depend on the db layer to check for admin access
+ for the auth manager, so we do it here"""
+ if not context.is_admin:
+ raise exception.NotAuthorized(_("Not admin user."))
+
+ def index(self, req):
+ raise faults.Fault(webob.exc.HTTPNotImplemented())
+
+ def detail(self, req):
+ raise faults.Fault(webob.exc.HTTPNotImplemented())
+
+ def show(self, req, id):
+ """Return data about the given account id"""
+ account = self.manager.get_project(id)
+ return dict(account=_translate_keys(account))
+
+ def delete(self, req, id):
+ self._check_admin(req.environ['nova.context'])
+ self.manager.delete_project(id)
+ return {}
+
+ def create(self, req):
+ """We use update with create-or-update semantics
+ because the id comes from an external source"""
+ raise faults.Fault(webob.exc.HTTPNotImplemented())
+
+ def update(self, req, id):
+ """This is really create or update."""
+ self._check_admin(req.environ['nova.context'])
+ env = self._deserialize(req.body, req.get_content_type())
+ description = env['account'].get('description')
+ manager = env['account'].get('manager')
+ try:
+ account = self.manager.get_project(id)
+ self.manager.modify_project(id, manager, description)
+ except exception.NotFound:
+ account = self.manager.create_project(id, manager, description)
+ return dict(account=_translate_keys(account))
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 1dfdd5318..f3a9bdeca 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -26,12 +26,15 @@ import webob.dec
from nova import auth
from nova import context
from nova import db
+from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
+LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
@@ -45,18 +48,27 @@ class AuthMiddleware(wsgi.Middleware):
self.auth = auth.manager.AuthManager()
super(AuthMiddleware, self).__init__(application)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if not self.has_authentication(req):
return self.authenticate(req)
-
user = self.get_user_by_authentication(req)
-
+ accounts = self.auth.get_projects(user=user)
if not user:
return faults.Fault(webob.exc.HTTPUnauthorized())
- project = self.auth.get_project(FLAGS.default_project)
- req.environ['nova.context'] = context.RequestContext(user, project)
+ if accounts:
+ #we are punting on this til auth is settled,
+ #and possibly til api v1.1 (mdragon)
+ account = accounts[0]
+ else:
+ return faults.Fault(webob.exc.HTTPUnauthorized())
+
+ if not self.auth.is_admin(user) and \
+ not self.auth.is_project_member(user, account):
+ return faults.Fault(webob.exc.HTTPUnauthorized())
+
+ req.environ['nova.context'] = context.RequestContext(user, account)
return self.application
def has_authentication(self, req):
@@ -103,11 +115,14 @@ class AuthMiddleware(wsgi.Middleware):
2 days ago.
"""
ctxt = context.get_admin_context()
- token = self.db.auth_get_token(ctxt, token_hash)
+ try:
+ token = self.db.auth_token_get(ctxt, token_hash)
+ except exception.NotFound:
+ return None
if token:
delta = datetime.datetime.now() - token.created_at
if delta.days >= 2:
- self.db.auth_destroy_token(ctxt, token)
+ self.db.auth_token_destroy(ctxt, token.token_hash)
else:
return self.auth.get_user(token.user_id)
return None
@@ -117,20 +132,25 @@ class AuthMiddleware(wsgi.Middleware):
username - string
key - string API key
- req - webob.Request object
+ req - wsgi.Request object
"""
ctxt = context.get_admin_context()
- user = self.auth.get_user_from_access_key(key)
+
+ try:
+ user = self.auth.get_user_from_access_key(key)
+ except exception.NotFound:
+ user = None
+
if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key,
time.time())).hexdigest()
token_dict = {}
token_dict['token_hash'] = token_hash
token_dict['cdn_management_url'] = ''
- # Same as auth url, e.g. http://foo.org:8774/baz/v1.0
- token_dict['server_management_url'] = req.url
+ os_url = req.url
+ token_dict['server_management_url'] = os_url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
- token = self.db.auth_create_token(ctxt, token_dict)
+ token = self.db.auth_token_create(ctxt, token_dict)
return token, user
return None, None
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index 7abb5f884..f2d2d86e8 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -42,7 +42,11 @@ class Controller(wsgi.Controller):
def index(self, req, server_id):
""" Returns the list of backup schedules for a given instance """
- return _translate_keys({})
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def show(self, req, server_id, id):
+ """ Returns a single backup schedule for a given instance """
+ return faults.Fault(exc.HTTPNotImplemented())
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 1dc3767e2..75aeb0a5f 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,36 +15,85 @@
# License for the specific language governing permissions and limitations
# under the License.
+from urlparse import urlparse
+
+import webob
+
from nova import exception
+from nova import flags
+from nova import log as logging
+
+
+LOG = logging.getLogger('common')
-def limited(items, request, max_limit=1000):
+FLAGS = flags.FLAGS
+
+
+def limited(items, request, max_limit=FLAGS.osapi_max_limit):
"""
Return a slice of items according to requested offset and limit.
@param items: A sliceable entity
- @param request: `webob.Request` possibly containing 'offset' and 'limit'
+ @param request: `wsgi.Request` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
- to max_limit.
+ to max_limit. Negative values for either offset or limit
+ will cause exc.HTTPBadRequest() exceptions to be raised.
@kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
- offset = 0
+ raise webob.exc.HTTPBadRequest(_('offset param must be an integer'))
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
- limit = max_limit
+ raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
+
+ if limit < 0:
+ raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+
+ if offset < 0:
+ raise webob.exc.HTTPBadRequest(_('offset param must be positive'))
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
+def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
+ """Return a slice of items according to the requested marker and limit."""
+
+ try:
+ marker = int(request.GET.get('marker', 0))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
+
+ try:
+ limit = int(request.GET.get('limit', max_limit))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
+
+ if limit < 0:
+ raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+
+ limit = min(max_limit, limit)
+ start_index = 0
+ if marker:
+ start_index = -1
+ for i, item in enumerate(items):
+ if item['id'] == marker:
+ start_index = i + 1
+ break
+ if start_index < 0:
+ raise webob.exc.HTTPBadRequest(_('marker [%s] not found' % marker))
+ range_end = start_index + limit
+ return items[start_index:range_end]
+
+
def get_image_id_from_image_hash(image_service, context, image_hash):
"""Given an Image ID Hash, return an objectstore Image ID.
@@ -65,3 +114,17 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
if abs(hash(image_id)) == int(image_hash):
return image_id
raise exception.NotFound(image_hash)
+
+
+def get_id_from_href(href):
+ """Return the id portion of a url as an int.
+
+ Given: http://www.foo.com/bar/123?q=4
+ Returns: 123
+
+ """
+ try:
+ return int(urlparse(href).path.split('/')[-1])
+ except:
+ LOG.debug(_("Error extracting id from href: %s") % href)
+ raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py
index 9ebdbe710..8c291c2eb 100644
--- a/nova/api/openstack/consoles.py
+++ b/nova/api/openstack/consoles.py
@@ -65,7 +65,7 @@ class Controller(wsgi.Controller):
def create(self, req, server_id):
"""Creates a new console"""
- #info = self._deserialize(req.body, req)
+ #info = self._deserialize(req.body, req.get_content_type())
self.console_api.create_console(
req.environ['nova.context'],
int(server_id))
diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py
new file mode 100644
index 000000000..b42a1d89d
--- /dev/null
+++ b/nova/api/openstack/contrib/__init__.py
@@ -0,0 +1,22 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
+"""Contrib contains extensions that are shipped with nova.
+
+It can't be called 'extensions' because that causes namespacing problems.
+
+"""
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
new file mode 100644
index 000000000..6efacce52
--- /dev/null
+++ b/nova/api/openstack/contrib/volumes.py
@@ -0,0 +1,336 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The volumes extension."""
+
+from webob import exc
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import volume
+from nova import wsgi
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+
+
+LOG = logging.getLogger("nova.api.volumes")
+
+
+FLAGS = flags.FLAGS
+
+
+def _translate_volume_detail_view(context, vol):
+ """Maps keys for volumes details view."""
+
+ d = _translate_volume_summary_view(context, vol)
+
+ # No additional data / lookups at the moment
+
+ return d
+
+
+def _translate_volume_summary_view(context, vol):
+ """Maps keys for volumes summary view."""
+ d = {}
+
+ d['id'] = vol['id']
+ d['status'] = vol['status']
+ d['size'] = vol['size']
+ d['availabilityZone'] = vol['availability_zone']
+ d['createdAt'] = vol['created_at']
+
+ if vol['attach_status'] == 'attached':
+ d['attachments'] = [_translate_attachment_detail_view(context, vol)]
+ else:
+ d['attachments'] = [{}]
+
+ d['displayName'] = vol['display_name']
+ d['displayDescription'] = vol['display_description']
+ return d
+
+
+class VolumeController(wsgi.Controller):
+ """The Volumes API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "volume": [
+ "id",
+ "status",
+ "size",
+ "availabilityZone",
+ "createdAt",
+ "displayName",
+ "displayDescription",
+ ]}}}
+
+ def __init__(self):
+ self.volume_api = volume.API()
+ super(VolumeController, self).__init__()
+
+ def show(self, req, id):
+ """Return data about the given volume."""
+ context = req.environ['nova.context']
+
+ try:
+ vol = self.volume_api.get(context, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volume': _translate_volume_detail_view(context, vol)}
+
+ def delete(self, req, id):
+ """Delete a volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete volume with id: %s"), id, context=context)
+
+ try:
+ self.volume_api.delete(context, volume_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ def index(self, req):
+ """Returns a summary list of volumes."""
+ return self._items(req, entity_maker=_translate_volume_summary_view)
+
+ def detail(self, req):
+ """Returns a detailed list of volumes."""
+ return self._items(req, entity_maker=_translate_volume_detail_view)
+
+ def _items(self, req, entity_maker):
+ """Returns a list of volumes, transformed through entity_maker."""
+ context = req.environ['nova.context']
+
+ volumes = self.volume_api.get_all(context)
+ limited_list = common.limited(volumes, req)
+ res = [entity_maker(context, vol) for vol in limited_list]
+ return {'volumes': res}
+
+ def create(self, req):
+ """Creates a new volume."""
+ context = req.environ['nova.context']
+
+ env = self._deserialize(req.body, req.get_content_type())
+ if not env:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol = env['volume']
+ size = vol['size']
+ LOG.audit(_("Create volume of %s GB"), size, context=context)
+ new_volume = self.volume_api.create(context, size,
+ vol.get('display_name'),
+ vol.get('display_description'))
+
+ # Work around problem that instance is lazy-loaded...
+ new_volume['instance'] = None
+
+ retval = _translate_volume_detail_view(context, new_volume)
+
+ return {'volume': retval}
+
+
+def _translate_attachment_detail_view(_context, vol):
+ """Maps keys for attachment details view."""
+
+ d = _translate_attachment_summary_view(_context, vol)
+
+ # No additional data / lookups at the moment
+
+ return d
+
+
+def _translate_attachment_summary_view(_context, vol):
+ """Maps keys for attachment summary view."""
+ d = {}
+
+ volume_id = vol['id']
+
+ # NOTE(justinsb): We use the volume id as the id of the attachment object
+ d['id'] = volume_id
+
+ d['volumeId'] = volume_id
+ if vol.get('instance_id'):
+ d['serverId'] = vol['instance_id']
+ if vol.get('mountpoint'):
+ d['device'] = vol['mountpoint']
+
+ return d
+
+
+class VolumeAttachmentController(wsgi.Controller):
+ """The volume attachment API controller for the Openstack API.
+
+ A child resource of the server. Note that we use the volume id
+ as the ID of the attachment (though this is not guaranteed externally)
+
+ """
+
+ _serialization_metadata = {
+ 'application/xml': {
+ 'attributes': {
+ 'volumeAttachment': ['id',
+ 'serverId',
+ 'volumeId',
+ 'device']}}}
+
+ def __init__(self):
+ self.compute_api = compute.API()
+ self.volume_api = volume.API()
+ super(VolumeAttachmentController, self).__init__()
+
+ def index(self, req, server_id):
+ """Returns the list of volume attachments for a given instance."""
+ return self._items(req, server_id,
+ entity_maker=_translate_attachment_summary_view)
+
+ def show(self, req, server_id, id):
+ """Return data about the given volume attachment."""
+ context = req.environ['nova.context']
+
+ volume_id = id
+ try:
+ vol = self.volume_api.get(context, volume_id)
+ except exception.NotFound:
+ LOG.debug("volume_id not found")
+ return faults.Fault(exc.HTTPNotFound())
+
+ if str(vol['instance_id']) != server_id:
+ LOG.debug("instance_id != server_id")
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volumeAttachment': _translate_attachment_detail_view(context,
+ vol)}
+
+ def create(self, req, server_id):
+ """Attach a volume to an instance."""
+ context = req.environ['nova.context']
+
+ env = self._deserialize(req.body, req.get_content_type())
+ if not env:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ instance_id = server_id
+ volume_id = env['volumeAttachment']['volumeId']
+ device = env['volumeAttachment']['device']
+
+ msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
+ " at %(device)s") % locals()
+ LOG.audit(msg, context=context)
+
+ try:
+ self.compute_api.attach_volume(context,
+ instance_id=instance_id,
+ volume_id=volume_id,
+ device=device)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ # The attach is async
+ attachment = {}
+ attachment['id'] = volume_id
+ attachment['volumeId'] = volume_id
+
+ # NOTE(justinsb): And now, we have a problem...
+ # The attach is async, so there's a window in which we don't see
+ # the attachment (until the attachment completes). We could also
+ # get problems with concurrent requests. I think we need an
+ # attachment state, and to write to the DB here, but that's a bigger
+ # change.
+ # For now, we'll probably have to rely on libraries being smart
+
+ # TODO(justinsb): How do I return "accepted" here?
+ return {'volumeAttachment': attachment}
+
+ def update(self, _req, _server_id, _id):
+ """Update a volume attachment. We don't currently support this."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, server_id, id):
+ """Detach a volume from an instance."""
+ context = req.environ['nova.context']
+
+ volume_id = id
+ LOG.audit(_("Detach volume %s"), volume_id, context=context)
+
+ try:
+ vol = self.volume_api.get(context, volume_id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ if str(vol['instance_id']) != server_id:
+ LOG.debug("instance_id != server_id")
+ return faults.Fault(exc.HTTPNotFound())
+
+ self.compute_api.detach_volume(context,
+ volume_id=volume_id)
+
+ return exc.HTTPAccepted()
+
+ def _items(self, req, server_id, entity_maker):
+ """Returns a list of attachments, transformed through entity_maker."""
+ context = req.environ['nova.context']
+
+ try:
+ instance = self.compute_api.get(context, server_id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ volumes = instance['volumes']
+ limited_list = common.limited(volumes, req)
+ res = [entity_maker(context, vol) for vol in limited_list]
+ return {'volumeAttachments': res}
+
+
+class Volumes(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "Volumes"
+
+ def get_alias(self):
+ return "VOLUMES"
+
+ def get_description(self):
+ return "Volumes support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/volumes/api/v1.1"
+
+ def get_updated(self):
+ return "2011-03-25T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ # NOTE(justinsb): No way to provide singular name ('volume')
+ # Does this matter?
+ res = extensions.ResourceExtension('volumes',
+ VolumeController(),
+ collection_actions={'detail': 'GET'}
+ )
+ resources.append(res)
+
+ res = extensions.ResourceExtension('volume_attachments',
+ VolumeAttachmentController(),
+ parent=dict(
+ member_name='server',
+ collection_name='servers'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
new file mode 100644
index 000000000..fb1dccb28
--- /dev/null
+++ b/nova/api/openstack/extensions.py
@@ -0,0 +1,450 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import imp
+import inspect
+import os
+import sys
+import routes
+import webob.dec
+import webob.exc
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+LOG = logging.getLogger('extensions')
+
+
+FLAGS = flags.FLAGS
+
+
+class ExtensionDescriptor(object):
+ """Base class that defines the contract for extensions.
+
+ Note that you don't have to derive from this class to have a valid
+ extension; it is purely a convenience.
+
+ """
+
+ def get_name(self):
+ """The name of the extension.
+
+ e.g. 'Fox In Socks'
+
+ """
+ raise NotImplementedError()
+
+ def get_alias(self):
+ """The alias for the extension.
+
+ e.g. 'FOXNSOX'
+
+ """
+ raise NotImplementedError()
+
+ def get_description(self):
+ """Friendly description for the extension.
+
+ e.g. 'The Fox In Socks Extension'
+
+ """
+ raise NotImplementedError()
+
+ def get_namespace(self):
+ """The XML namespace for the extension.
+
+ e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0'
+
+ """
+ raise NotImplementedError()
+
+ def get_updated(self):
+ """The timestamp when the extension was last updated.
+
+ e.g. '2011-01-22T13:25:27-06:00'
+
+ """
+ # NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
+ raise NotImplementedError()
+
+ def get_resources(self):
+ """List of extensions.ResourceExtension extension objects.
+
+ Resources define new nouns, and are accessible through URLs.
+
+ """
+ resources = []
+ return resources
+
+ def get_actions(self):
+ """List of extensions.ActionExtension extension objects.
+
+ Actions are verbs callable from the API.
+
+ """
+ actions = []
+ return actions
+
+ def get_response_extensions(self):
+ """List of extensions.ResponseExtension extension objects.
+
+ Response extensions are used to insert information into existing
+ response data.
+
+ """
+ response_exts = []
+ return response_exts
+
+
+class ActionExtensionController(wsgi.Controller):
+
+ def __init__(self, application):
+
+ self.application = application
+ self.action_handlers = {}
+
+ def add_action(self, action_name, handler):
+ self.action_handlers[action_name] = handler
+
+ def action(self, req, id):
+
+ input_dict = self._deserialize(req.body, req.get_content_type())
+ for action_name, handler in self.action_handlers.iteritems():
+ if action_name in input_dict:
+ return handler(input_dict, req, id)
+ # no action handler found (bump to downstream application)
+ res = self.application
+ return res
+
+
+class ResponseExtensionController(wsgi.Controller):
+
+ def __init__(self, application):
+ self.application = application
+ self.handlers = []
+
+ def add_handler(self, handler):
+ self.handlers.append(handler)
+
+ def process(self, req, *args, **kwargs):
+ res = req.get_response(self.application)
+ content_type = req.best_match_content_type()
+ # currently response handlers are un-ordered
+ for handler in self.handlers:
+ res = handler(res)
+ try:
+ body = res.body
+ headers = res.headers
+ except AttributeError:
+ body = self._serialize(res, content_type)
+ headers = {"Content-Type": content_type}
+ res = webob.Response()
+ res.body = body
+ res.headers = headers
+ return res
+
+
+class ExtensionController(wsgi.Controller):
+
+ def __init__(self, extension_manager):
+ self.extension_manager = extension_manager
+
+ def _translate(self, ext):
+ ext_data = {}
+ ext_data['name'] = ext.get_name()
+ ext_data['alias'] = ext.get_alias()
+ ext_data['description'] = ext.get_description()
+ ext_data['namespace'] = ext.get_namespace()
+ ext_data['updated'] = ext.get_updated()
+ ext_data['links'] = [] # TODO(dprince): implement extension links
+ return ext_data
+
+ def index(self, req):
+ extensions = []
+ for _alias, ext in self.extension_manager.extensions.iteritems():
+ extensions.append(self._translate(ext))
+ return dict(extensions=extensions)
+
+ def show(self, req, id):
+ # NOTE(dprince): the extensions alias is used as the 'id' for show
+ ext = self.extension_manager.extensions[id]
+ return self._translate(ext)
+
+ def delete(self, req, id):
+ raise faults.Fault(webob.exc.HTTPNotFound())
+
+ def create(self, req):
+ raise faults.Fault(webob.exc.HTTPNotFound())
+
+
+class ExtensionMiddleware(wsgi.Middleware):
+ """Extensions middleware for WSGI."""
+ @classmethod
+ def factory(cls, global_config, **local_config):
+ """Paste factory."""
+ def _factory(app):
+ return cls(app, **local_config)
+ return _factory
+
+ def _action_ext_controllers(self, application, ext_mgr, mapper):
+ """Return a dict of ActionExtensionController-s by collection."""
+ action_controllers = {}
+ for action in ext_mgr.get_actions():
+ if not action.collection in action_controllers.keys():
+ controller = ActionExtensionController(application)
+ mapper.connect("/%s/:(id)/action.:(format)" %
+ action.collection,
+ action='action',
+ controller=controller,
+ conditions=dict(method=['POST']))
+ mapper.connect("/%s/:(id)/action" % action.collection,
+ action='action',
+ controller=controller,
+ conditions=dict(method=['POST']))
+ action_controllers[action.collection] = controller
+
+ return action_controllers
+
+ def _response_ext_controllers(self, application, ext_mgr, mapper):
+ """Returns a dict of ResponseExtensionController-s by collection."""
+ response_ext_controllers = {}
+ for resp_ext in ext_mgr.get_response_extensions():
+ if not resp_ext.key in response_ext_controllers.keys():
+ controller = ResponseExtensionController(application)
+ mapper.connect(resp_ext.url_route + '.:(format)',
+ action='process',
+ controller=controller,
+ conditions=resp_ext.conditions)
+
+ mapper.connect(resp_ext.url_route,
+ action='process',
+ controller=controller,
+ conditions=resp_ext.conditions)
+ response_ext_controllers[resp_ext.key] = controller
+
+ return response_ext_controllers
+
+ def __init__(self, application, ext_mgr=None):
+
+ if ext_mgr is None:
+ ext_mgr = ExtensionManager(FLAGS.osapi_extensions_path)
+ self.ext_mgr = ext_mgr
+
+ mapper = routes.Mapper()
+
+ # extended resources
+ for resource in ext_mgr.get_resources():
+ LOG.debug(_('Extended resource: %s'),
+ resource.collection)
+ mapper.resource(resource.collection, resource.collection,
+ controller=resource.controller,
+ collection=resource.collection_actions,
+ member=resource.member_actions,
+ parent_resource=resource.parent)
+
+ # extended actions
+ action_controllers = self._action_ext_controllers(application, ext_mgr,
+ mapper)
+ for action in ext_mgr.get_actions():
+ LOG.debug(_('Extended action: %s'), action.action_name)
+ controller = action_controllers[action.collection]
+ controller.add_action(action.action_name, action.handler)
+
+ # extended responses
+ resp_controllers = self._response_ext_controllers(application, ext_mgr,
+ mapper)
+ for response_ext in ext_mgr.get_response_extensions():
+ LOG.debug(_('Extended response: %s'), response_ext.key)
+ controller = resp_controllers[response_ext.key]
+ controller.add_handler(response_ext.handler)
+
+ self._router = routes.middleware.RoutesMiddleware(self._dispatch,
+ mapper)
+
+ super(ExtensionMiddleware, self).__init__(application)
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ """Route the incoming request with router."""
+ req.environ['extended.app'] = self.application
+ return self._router
+
+ @staticmethod
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def _dispatch(req):
+ """Dispatch the request.
+
+ Returns the routed WSGI app's response or defers to the extended
+ application.
+
+ """
+ match = req.environ['wsgiorg.routing_args'][1]
+ if not match:
+ return req.environ['extended.app']
+ app = match['controller']
+ return app
+
+
+class ExtensionManager(object):
+ """Load extensions from the configured extension path.
+
+ See nova/tests/api/openstack/extensions/foxinsocks/extension.py for an
+ example extension implementation.
+
+ """
+
+ def __init__(self, path):
+ LOG.audit(_('Initializing extension manager.'))
+
+ self.path = path
+ self.extensions = {}
+ self._load_all_extensions()
+
+ def get_resources(self):
+ """Returns a list of ResourceExtension objects."""
+ resources = []
+ resources.append(ResourceExtension('extensions',
+ ExtensionController(self)))
+ for alias, ext in self.extensions.iteritems():
+ try:
+ resources.extend(ext.get_resources())
+ except AttributeError:
+ # NOTE(dprince): Extension aren't required to have resource
+ # extensions
+ pass
+ return resources
+
+ def get_actions(self):
+ """Returns a list of ActionExtension objects."""
+ actions = []
+ for alias, ext in self.extensions.iteritems():
+ try:
+ actions.extend(ext.get_actions())
+ except AttributeError:
+ # NOTE(dprince): Extension aren't required to have action
+ # extensions
+ pass
+ return actions
+
+ def get_response_extensions(self):
+ """Returns a list of ResponseExtension objects."""
+ response_exts = []
+ for alias, ext in self.extensions.iteritems():
+ try:
+ response_exts.extend(ext.get_response_extensions())
+ except AttributeError:
+ # NOTE(dprince): Extension aren't required to have response
+ # extensions
+ pass
+ return response_exts
+
+ def _check_extension(self, extension):
+ """Checks for required methods in extension objects."""
+ try:
+ LOG.debug(_('Ext name: %s'), extension.get_name())
+ LOG.debug(_('Ext alias: %s'), extension.get_alias())
+ LOG.debug(_('Ext description: %s'), extension.get_description())
+ LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
+ LOG.debug(_('Ext updated: %s'), extension.get_updated())
+ except AttributeError as ex:
+ LOG.exception(_("Exception loading extension: %s"), unicode(ex))
+
+ def _load_all_extensions(self):
+ """Load extensions from the configured path.
+
+ Load extensions from the configured path. The extension name is
+ constructed from the module_name. If your extension module was named
+ widgets.py the extension class within that module should be
+ 'Widgets'.
+
+ In addition, extensions are loaded from the 'contrib' directory.
+
+ See nova/tests/api/openstack/extensions/foxinsocks.py for an example
+ extension implementation.
+
+ """
+ if os.path.exists(self.path):
+ self._load_all_extensions_from_path(self.path)
+
+ contrib_path = os.path.join(os.path.dirname(__file__), "contrib")
+ if os.path.exists(contrib_path):
+ self._load_all_extensions_from_path(contrib_path)
+
+ def _load_all_extensions_from_path(self, path):
+ for f in os.listdir(path):
+ LOG.audit(_('Loading extension file: %s'), f)
+ mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
+ ext_path = os.path.join(path, f)
+ if file_ext.lower() == '.py' and not mod_name.startswith('_'):
+ mod = imp.load_source(mod_name, ext_path)
+ ext_name = mod_name[0].upper() + mod_name[1:]
+ new_ext_class = getattr(mod, ext_name, None)
+ if not new_ext_class:
+ LOG.warn(_('Did not find expected name '
+ '"%(ext_name)s" in %(file)s'),
+ {'ext_name': ext_name,
+ 'file': ext_path})
+ continue
+ new_ext = new_ext_class()
+ self._check_extension(new_ext)
+ self._add_extension(new_ext)
+
+ def _add_extension(self, ext):
+ alias = ext.get_alias()
+ LOG.audit(_('Loaded extension: %s'), alias)
+
+ self._check_extension(ext)
+
+ if alias in self.extensions:
+ raise exception.Error("Found duplicate extension: %s" % alias)
+ self.extensions[alias] = ext
+
+
+class ResponseExtension(object):
+ """Add data to responses from core nova OpenStack API controllers."""
+
+ def __init__(self, method, url_route, handler):
+ self.url_route = url_route
+ self.handler = handler
+ self.conditions = dict(method=[method])
+ self.key = "%s-%s" % (method, url_route)
+
+
+class ActionExtension(object):
+ """Add custom actions to core nova OpenStack API controllers."""
+
+ def __init__(self, collection, action_name, handler):
+ self.collection = collection
+ self.action_name = action_name
+ self.handler = handler
+
+
+class ResourceExtension(object):
+ """Add top level resources to the OpenStack API in nova."""
+
+ def __init__(self, collection, controller, parent=None,
+ collection_actions={}, member_actions={}):
+ self.collection = collection
+ self.controller = controller
+ self.parent = parent
+ self.collection_actions = collection_actions
+ self.member_actions = member_actions
diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py
index 224a7ef0b..940bd8771 100644
--- a/nova/api/openstack/faults.py
+++ b/nova/api/openstack/faults.py
@@ -42,7 +42,7 @@ class Fault(webob.exc.HTTPException):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
@@ -57,6 +57,47 @@ class Fault(webob.exc.HTTPException):
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
- serializer = wsgi.Serializer(req.environ, metadata)
- self.wrapped_exc.body = serializer.to_content_type(fault_data)
+ serializer = wsgi.Serializer(metadata)
+ content_type = req.best_match_content_type()
+ self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
+ self.wrapped_exc.content_type = content_type
+ return self.wrapped_exc
+
+
+class OverLimitFault(webob.exc.HTTPException):
+ """
+ Rate-limited request response.
+ """
+
+ _serialization_metadata = {
+ "application/xml": {
+ "attributes": {
+ "overLimitFault": "code",
+ },
+ },
+ }
+
+ def __init__(self, message, details, retry_time):
+ """
+ Initialize new `OverLimitFault` with relevant information.
+ """
+ self.wrapped_exc = webob.exc.HTTPForbidden()
+ self.content = {
+ "overLimitFault": {
+ "code": self.wrapped_exc.status_int,
+ "message": message,
+ "details": details,
+ },
+ }
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, request):
+ """
+ Return the wrapped exception with a serialized body conforming to our
+ error format.
+ """
+ serializer = wsgi.Serializer(self._serialization_metadata)
+ content_type = request.best_match_content_type()
+ content = serializer.serialize(self.content, content_type)
+ self.wrapped_exc.body = content
return self.wrapped_exc
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index f620d4107..5b99b5a6f 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -15,13 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from webob import exc
+import webob
-from nova.api.openstack import faults
-from nova.api.openstack import common
-from nova.compute import instance_types
+from nova import db
+from nova import exception
from nova import wsgi
-import nova.api.openstack
+from nova.api.openstack import views
class Controller(wsgi.Controller):
@@ -30,28 +29,50 @@ class Controller(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
- "flavor": ["id", "name", "ram", "disk"]}}}
+ "flavor": ["id", "name", "ram", "disk"],
+ "link": ["rel", "type", "href"],
+ }
+ }
+ }
def index(self, req):
"""Return all flavors in brief."""
- return dict(flavors=[dict(id=flavor['id'], name=flavor['name'])
- for flavor in self.detail(req)['flavors']])
+ items = self._get_flavors(req, is_detail=False)
+ return dict(flavors=items)
def detail(self, req):
"""Return all flavors in detail."""
- items = [self.show(req, id)['flavor'] for id in self._all_ids()]
- items = common.limited(items, req)
+ items = self._get_flavors(req, is_detail=True)
return dict(flavors=items)
+ def _get_flavors(self, req, is_detail=True):
+ """Helper function that returns a list of flavor dicts."""
+ ctxt = req.environ['nova.context']
+ flavors = db.api.instance_type_get_all(ctxt)
+ builder = self._get_view_builder(req)
+ items = [builder.build(flavor, is_detail=is_detail)
+ for flavor in flavors.values()]
+ return items
+
def show(self, req, id):
"""Return data about the given flavor id."""
- for name, val in instance_types.INSTANCE_TYPES.iteritems():
- if val['flavorid'] == int(id):
- item = dict(ram=val['memory_mb'], disk=val['local_gb'],
- id=val['flavorid'], name=name)
- return dict(flavor=item)
- raise faults.Fault(exc.HTTPNotFound())
-
- def _all_ids(self):
- """Return the list of all flavorids."""
- return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()]
+ try:
+ ctxt = req.environ['nova.context']
+ flavor = db.api.instance_type_get_by_flavor_id(ctxt, id)
+ except exception.NotFound:
+ return webob.exc.HTTPNotFound()
+
+ builder = self._get_view_builder(req)
+ values = builder.build(flavor, is_detail=True)
+ return dict(flavor=values)
+
+
+class ControllerV10(Controller):
+ def _get_view_builder(self, req):
+ return views.flavors.ViewBuilder()
+
+
+class ControllerV11(Controller):
+ def _get_view_builder(self, req):
+ base_url = req.application_url
+ return views.flavors.ViewBuilderV11(base_url)
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
new file mode 100644
index 000000000..c9d6ac532
--- /dev/null
+++ b/nova/api/openstack/image_metadata.py
@@ -0,0 +1,93 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import flags
+from nova import utils
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+FLAGS = flags.FLAGS
+
+
+class Controller(wsgi.Controller):
+ """The image metadata API controller for the Openstack API"""
+
+ def __init__(self):
+ self.image_service = utils.import_object(FLAGS.image_service)
+ super(Controller, self).__init__()
+
+ def _get_metadata(self, context, image_id, image=None):
+ if not image:
+ image = self.image_service.show(context, image_id)
+ metadata = image.get('properties', {})
+ return metadata
+
+ def index(self, req, image_id):
+ """Returns the list of metadata for a given instance"""
+ context = req.environ['nova.context']
+ metadata = self._get_metadata(context, image_id)
+ return dict(metadata=metadata)
+
+ def show(self, req, image_id, id):
+ context = req.environ['nova.context']
+ metadata = self._get_metadata(context, image_id)
+ if id in metadata:
+ return {id: metadata[id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def create(self, req, image_id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ img = self.image_service.show(context, image_id)
+ metadata = self._get_metadata(context, image_id, img)
+ if 'metadata' in body:
+ for key, value in body['metadata'].iteritems():
+ metadata[key] = value
+ img['properties'] = metadata
+ self.image_service.update(context, image_id, img, None)
+ return dict(metadata=metadata)
+
+ def update(self, req, image_id, id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ img = self.image_service.show(context, image_id)
+ metadata = self._get_metadata(context, image_id, img)
+ metadata[id] = body[id]
+ img['properties'] = metadata
+ self.image_service.update(context, image_id, img, None)
+
+ return req.body
+
+ def delete(self, req, image_id, id):
+ context = req.environ['nova.context']
+ img = self.image_service.show(context, image_id)
+ metadata = self._get_metadata(context, image_id)
+ if not id in metadata:
+ return faults.Fault(exc.HTTPNotFound())
+ metadata.pop(id)
+ img['properties'] = metadata
+ self.image_service.update(context, image_id, img, None)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index cf85a496f..e77100d7b 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -1,6 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,152 +13,143 @@
# License for the specific language governing permissions and limitations
# under the License.
-from webob import exc
+import datetime
+
+import webob.exc
from nova import compute
+from nova import exception
from nova import flags
+from nova import log
from nova import utils
from nova import wsgi
-import nova.api.openstack
from nova.api.openstack import common
from nova.api.openstack import faults
-import nova.image.service
+from nova.api.openstack.views import images as images_view
+LOG = log.getLogger('nova.api.openstack.images')
FLAGS = flags.FLAGS
-def _translate_keys(item):
- """
- Maps key names to Rackspace-like attributes for return
- also pares down attributes to those we want
- item is a dict
-
- Note: should be removed when the set of keys expected by the api
- and the set of keys returned by the image service are equivalent
-
- """
- # TODO(tr3buchet): this map is specific to s3 object store,
- # replace with a list of keys for _filter_keys later
- mapped_keys = {'status': 'imageState',
- 'id': 'imageId',
- 'name': 'imageLocation'}
-
- mapped_item = {}
- # TODO(tr3buchet):
- # this chunk of code works with s3 and the local image service/glance
- # when we switch to glance/local image service it can be replaced with
- # a call to _filter_keys, and mapped_keys can be changed to a list
- try:
- for k, v in mapped_keys.iteritems():
- # map s3 fields
- mapped_item[k] = item[v]
- except KeyError:
- # return only the fields api expects
- mapped_item = _filter_keys(item, mapped_keys.keys())
-
- return mapped_item
-
-
-def _translate_status(item):
- """
- Translates status of image to match current Rackspace api bindings
- item is a dict
-
- Note: should be removed when the set of statuses expected by the api
- and the set of statuses returned by the image service are equivalent
-
- """
- status_mapping = {
- 'pending': 'queued',
- 'decrypting': 'preparing',
- 'untarring': 'saving',
- 'available': 'active'}
- try:
- item['status'] = status_mapping[item['status']]
- except KeyError:
- # TODO(sirp): Performing translation of status (if necessary) here for
- # now. Perhaps this should really be done in EC2 API and
- # S3ImageService
- pass
-
- return item
-
-
-def _filter_keys(item, keys):
- """
- Filters all model attributes except for keys
- item is a dict
-
- """
- return dict((k, v) for k, v in item.iteritems() if k in keys)
-
-
-def _convert_image_id_to_hash(image):
- if 'imageId' in image:
- # Convert EC2-style ID (i-blah) to Rackspace-style (int)
- image_id = abs(hash(image['imageId']))
- image['imageId'] = image_id
- image['id'] = image_id
-
-
class Controller(wsgi.Controller):
+ """Base `wsgi.Controller` for retrieving/displaying images."""
_serialization_metadata = {
'application/xml': {
"attributes": {
"image": ["id", "name", "updated", "created", "status",
- "serverId", "progress"]}}}
+ "serverId", "progress"],
+ "link": ["rel", "type", "href"],
+ },
+ },
+ }
- def __init__(self):
- self._service = utils.import_object(FLAGS.image_service)
+ def __init__(self, image_service=None, compute_service=None):
+ """Initialize new `ImageController`.
+
+ :param compute_service: `nova.compute.api:API`
+ :param image_service: `nova.image.service:BaseImageService`
+ """
+ _default_service = utils.import_object(flags.FLAGS.image_service)
+
+ self._compute_service = compute_service or compute.API()
+ self._image_service = image_service or _default_service
def index(self, req):
- """Return all public images in brief"""
- items = self._service.index(req.environ['nova.context'])
- items = common.limited(items, req)
- items = [_filter_keys(item, ('id', 'name')) for item in items]
- return dict(images=items)
+ """Return an index listing of images available to the request.
+
+ :param req: `wsgi.Request` object
+ """
+ context = req.environ['nova.context']
+ images = self._image_service.index(context)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=False) for image in images])
def detail(self, req):
- """Return all public images in detail"""
- try:
- items = self._service.detail(req.environ['nova.context'])
- except NotImplementedError:
- items = self._service.index(req.environ['nova.context'])
- for image in items:
- _convert_image_id_to_hash(image)
+ """Return a detailed index listing of images available to the request.
- items = common.limited(items, req)
- items = [_translate_keys(item) for item in items]
- items = [_translate_status(item) for item in items]
- return dict(images=items)
+ :param req: `wsgi.Request` object.
+ """
+ context = req.environ['nova.context']
+ images = self._image_service.detail(context)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=True) for image in images])
def show(self, req, id):
- """Return data about the given image id"""
- image_id = common.get_image_id_from_image_hash(self._service,
- req.environ['nova.context'], id)
+ """Return detailed information about a specific image.
- image = self._service.show(req.environ['nova.context'], image_id)
- _convert_image_id_to_hash(image)
- return dict(image=image)
+ :param req: `wsgi.Request` object
+ :param id: Image identifier (integer)
+ """
+ context = req.environ['nova.context']
+
+ try:
+ image_id = int(id)
+ except ValueError:
+ explanation = _("Image not found.")
+ raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation))
+
+ try:
+ image = self._image_service.show(context, image_id)
+ except exception.NotFound:
+ explanation = _("Image '%d' not found.") % (image_id)
+ raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation))
+
+ return dict(image=self.get_builder(req).build(image, detail=True))
def delete(self, req, id):
- # Only public images are supported for now.
- raise faults.Fault(exc.HTTPNotFound())
+ """Delete an image, if allowed.
+
+ :param req: `wsgi.Request` object
+ :param id: Image identifier (integer)
+ """
+ image_id = id
+ context = req.environ['nova.context']
+ self._image_service.delete(context, image_id)
+ return webob.exc.HTTPNoContent()
def create(self, req):
+ """Snapshot a server instance and save the image.
+
+ :param req: `wsgi.Request` object
+ """
context = req.environ['nova.context']
- env = self._deserialize(req.body, req)
- instance_id = env["image"]["serverId"]
- name = env["image"]["name"]
+ content_type = req.get_content_type()
+ image = self._deserialize(req.body, content_type)
+
+ if not image:
+ raise webob.exc.HTTPBadRequest()
+
+ try:
+ server_id = image["image"]["serverId"]
+ image_name = image["image"]["name"]
+ except KeyError:
+ raise webob.exc.HTTPBadRequest()
+
+ image = self._compute_service.snapshot(context, server_id, image_name)
+ return self.get_builder(req).build(image, detail=True)
+
+ def get_builder(self, request):
+ """Indicates that you must use a Controller subclass."""
+ raise NotImplementedError
+
+
+class ControllerV10(Controller):
+ """Version 1.0 specific controller logic."""
+
+ def get_builder(self, request):
+ """Property to get the ViewBuilder class we need to use."""
+ base_url = request.application_url
+ return images_view.ViewBuilderV10(base_url)
- image_meta = compute.API().snapshot(
- context, instance_id, name)
- return dict(image=image_meta)
+class ControllerV11(Controller):
+ """Version 1.1 specific controller logic."""
- def update(self, req, id):
- # Users may not modify public images, and that's all that
- # we support for now.
- raise faults.Fault(exc.HTTPNotFound())
+ def get_builder(self, request):
+ """Property to get the ViewBuilder class we need to use."""
+ base_url = request.application_url
+ return images_view.ViewBuilderV11(base_url)
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
new file mode 100644
index 000000000..efc7d193d
--- /dev/null
+++ b/nova/api/openstack/limits.py
@@ -0,0 +1,358 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
+"""
+Module dedicated functions/classes dealing with rate limiting requests.
+"""
+
+import copy
+import httplib
+import json
+import math
+import re
+import time
+import urllib
+import webob.exc
+
+from collections import defaultdict
+
+from webob.dec import wsgify
+
+from nova import wsgi
+from nova.api.openstack import faults
+from nova.wsgi import Controller
+from nova.wsgi import Middleware
+
+
+# Convenience constants for the limits dictionary passed to Limiter().
+PER_SECOND = 1
+PER_MINUTE = 60
+PER_HOUR = 60 * 60
+PER_DAY = 60 * 60 * 24
+
+
+class LimitsController(Controller):
+ """
+ Controller for accessing limits in the OpenStack API.
+ """
+
+ _serialization_metadata = {
+ "application/xml": {
+ "attributes": {
+ "limit": ["verb", "URI", "regex", "value", "unit",
+ "resetTime", "remaining", "name"],
+ },
+ "plurals": {
+ "rate": "limit",
+ },
+ },
+ }
+
+ def index(self, req):
+ """
+ Return all global and rate limit information.
+ """
+ abs_limits = {}
+ rate_limits = req.environ.get("nova.limits", [])
+
+ return {
+ "limits": {
+ "rate": rate_limits,
+ "absolute": abs_limits,
+ },
+ }
+
+
+class Limit(object):
+ """
+ Stores information about a limit for HTTP requets.
+ """
+
+ UNITS = {
+ 1: "SECOND",
+ 60: "MINUTE",
+ 60 * 60: "HOUR",
+ 60 * 60 * 24: "DAY",
+ }
+
+ def __init__(self, verb, uri, regex, value, unit):
+ """
+ Initialize a new `Limit`.
+
+ @param verb: HTTP verb (POST, PUT, etc.)
+ @param uri: Human-readable URI
+ @param regex: Regular expression format for this limit
+ @param value: Integer number of requests which can be made
+ @param unit: Unit of measure for the value parameter
+ """
+ self.verb = verb
+ self.uri = uri
+ self.regex = regex
+ self.value = int(value)
+ self.unit = unit
+ self.unit_string = self.display_unit().lower()
+ self.remaining = int(value)
+
+ if value <= 0:
+ raise ValueError("Limit value must be > 0")
+
+ self.last_request = None
+ self.next_request = None
+
+ self.water_level = 0
+ self.capacity = self.unit
+ self.request_value = float(self.capacity) / float(self.value)
+ self.error_message = _("Only %(value)s %(verb)s request(s) can be "\
+ "made to %(uri)s every %(unit_string)s." % self.__dict__)
+
+ def __call__(self, verb, url):
+ """
+ Represents a call to this limit from a relevant request.
+
+ @param verb: string http verb (POST, GET, etc.)
+ @param url: string URL
+ """
+ if self.verb != verb or not re.match(self.regex, url):
+ return
+
+ now = self._get_time()
+
+ if self.last_request is None:
+ self.last_request = now
+
+ leak_value = now - self.last_request
+
+ self.water_level -= leak_value
+ self.water_level = max(self.water_level, 0)
+ self.water_level += self.request_value
+
+ difference = self.water_level - self.capacity
+
+ self.last_request = now
+
+ if difference > 0:
+ self.water_level -= self.request_value
+ self.next_request = now + difference
+ return difference
+
+ cap = self.capacity
+ water = self.water_level
+ val = self.value
+
+ self.remaining = math.floor(((cap - water) / cap) * val)
+ self.next_request = now
+
+ def _get_time(self):
+ """Retrieve the current time. Broken out for testability."""
+ return time.time()
+
+ def display_unit(self):
+ """Display the string name of the unit."""
+ return self.UNITS.get(self.unit, "UNKNOWN")
+
+ def display(self):
+ """Return a useful representation of this class."""
+ return {
+ "verb": self.verb,
+ "URI": self.uri,
+ "regex": self.regex,
+ "value": self.value,
+ "remaining": int(self.remaining),
+ "unit": self.display_unit(),
+ "resetTime": int(self.next_request or self._get_time()),
+ }
+
+# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
+# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
+
+DEFAULT_LIMITS = [
+ Limit("POST", "*", ".*", 10, PER_MINUTE),
+ Limit("POST", "*/servers", "^/servers", 50, PER_DAY),
+ Limit("PUT", "*", ".*", 10, PER_MINUTE),
+ Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
+ Limit("DELETE", "*", ".*", 100, PER_MINUTE),
+]
+
+
+class RateLimitingMiddleware(Middleware):
+ """
+ Rate-limits requests passing through this middleware. All limit information
+ is stored in memory for this implementation.
+ """
+
+ def __init__(self, application, limits=None):
+ """
+ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI
+ application and sets up the given limits.
+
+ @param application: WSGI application to wrap
+ @param limits: List of dictionaries describing limits
+ """
+ Middleware.__init__(self, application)
+ self._limiter = Limiter(limits or DEFAULT_LIMITS)
+
+ @wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ """
+ Represents a single call through this middleware. We should record the
+ request if we have a limit relevant to it. If no limit is relevant to
+ the request, ignore it.
+
+ If the request should be rate limited, return a fault telling the user
+ they are over the limit and need to retry later.
+ """
+ verb = req.method
+ url = req.url
+ context = req.environ.get("nova.context")
+
+ if context:
+ username = context.user_id
+ else:
+ username = None
+
+ delay, error = self._limiter.check_for_delay(verb, url, username)
+
+ if delay:
+ msg = _("This request was rate-limited.")
+ retry = time.time() + delay
+ return faults.OverLimitFault(msg, error, retry)
+
+ req.environ["nova.limits"] = self._limiter.get_limits(username)
+
+ return self.application
+
+
+class Limiter(object):
+ """
+ Rate-limit checking class which handles limits in memory.
+ """
+
+ def __init__(self, limits):
+ """
+ Initialize the new `Limiter`.
+
+ @param limits: List of `Limit` objects
+ """
+ self.limits = copy.deepcopy(limits)
+ self.levels = defaultdict(lambda: copy.deepcopy(limits))
+
+ def get_limits(self, username=None):
+ """
+ Return the limits for a given user.
+ """
+ return [limit.display() for limit in self.levels[username]]
+
+ def check_for_delay(self, verb, url, username=None):
+ """
+ Check the given verb/user/user triplet for limit.
+
+ @return: Tuple of delay (in seconds) and error message (or None, None)
+ """
+ delays = []
+
+ for limit in self.levels[username]:
+ delay = limit(verb, url)
+ if delay:
+ delays.append((delay, limit.error_message))
+
+ if delays:
+ delays.sort()
+ return delays[0]
+
+ return None, None
+
+
+class WsgiLimiter(object):
+ """
+ Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`.
+
+ To use:
+ POST /<username> with JSON data such as:
+ {
+ "verb" : GET,
+ "path" : "/servers"
+ }
+
+ and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
+ header containing the number of seconds to wait before the action would
+ succeed.
+ """
+
+ def __init__(self, limits=None):
+ """
+ Initialize the new `WsgiLimiter`.
+
+ @param limits: List of `Limit` objects
+ """
+ self._limiter = Limiter(limits or DEFAULT_LIMITS)
+
+ @wsgify(RequestClass=wsgi.Request)
+ def __call__(self, request):
+ """
+ Handles a call to this application. Returns 204 if the request is
+ acceptable to the limiter, else a 403 is returned with a relevant
+ header indicating when the request *will* succeed.
+ """
+ if request.method != "POST":
+ raise webob.exc.HTTPMethodNotAllowed()
+
+ try:
+ info = dict(json.loads(request.body))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest()
+
+ username = request.path_info_pop()
+ verb = info.get("verb")
+ path = info.get("path")
+
+ delay, error = self._limiter.check_for_delay(verb, path, username)
+
+ if delay:
+ headers = {"X-Wait-Seconds": "%.2f" % delay}
+ return webob.exc.HTTPForbidden(headers=headers, explanation=error)
+ else:
+ return webob.exc.HTTPNoContent()
+
+
+class WsgiLimiterProxy(object):
+ """
+ Rate-limit requests based on answers from a remote source.
+ """
+
+ def __init__(self, limiter_address):
+ """
+ Initialize the new `WsgiLimiterProxy`.
+
+ @param limiter_address: IP/port combination of where to request limit
+ """
+ self.limiter_address = limiter_address
+
+ def check_for_delay(self, verb, path, username=None):
+ body = json.dumps({"verb": verb, "path": path})
+ headers = {"Content-Type": "application/json"}
+
+ conn = httplib.HTTPConnection(self.limiter_address)
+
+ if username:
+ conn.request("POST", "/%s" % (username), body, headers)
+ else:
+ conn.request("POST", "/", body, headers)
+
+ resp = conn.getresponse()
+
+ if 200 >= resp.status < 300:
+ return None, None
+
+ return resp.getheader("X-Wait-Seconds"), resp.read() or None
diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index cbb4b897e..88ffc3246 100644
--- a/nova/api/openstack/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
@@ -57,7 +57,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
self.limiter = WSGIAppProxy(service_host)
super(RateLimitingMiddleware, self).__init__(application)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Rate limit the request.
@@ -183,7 +183,7 @@ class WSGIApp(object):
"""Create the WSGI application using the given Limiter instance."""
self.limiter = limiter
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
parts = req.path_info.split('/')
# format: /limiter/<username>/<urlencoded action>
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
new file mode 100644
index 000000000..45bbac99d
--- /dev/null
+++ b/nova/api/openstack/server_metadata.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import compute
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+class Controller(wsgi.Controller):
+ """ The server metadata API controller for the Openstack API """
+
+ def __init__(self):
+ self.compute_api = compute.API()
+ super(Controller, self).__init__()
+
+ def _get_metadata(self, context, server_id):
+ metadata = self.compute_api.get_instance_metadata(context, server_id)
+ meta_dict = {}
+ for key, value in metadata.iteritems():
+ meta_dict[key] = value
+ return dict(metadata=meta_dict)
+
+ def index(self, req, server_id):
+ """ Returns the list of metadata for a given instance """
+ context = req.environ['nova.context']
+ return self._get_metadata(context, server_id)
+
+ def create(self, req, server_id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ self.compute_api.update_or_create_instance_metadata(context,
+ server_id,
+ body['metadata'])
+ return req.body
+
+ def update(self, req, server_id, id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ self.compute_api.update_or_create_instance_metadata(context,
+ server_id,
+ body)
+ return req.body
+
+ def show(self, req, server_id, id):
+ """ Return a single metadata item """
+ context = req.environ['nova.context']
+ data = self._get_metadata(context, server_id)
+ if id in data['metadata']:
+ return {id: data['metadata'][id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def delete(self, req, server_id, id):
+ """ Deletes an existing metadata """
+ context = req.environ['nova.context']
+ self.compute_api.delete_instance_metadata(context, server_id, id)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 73c7bfe17..6704a68ae 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -13,120 +13,97 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
+import base64
+import hashlib
import traceback
from webob import exc
+from xml.dom import minidom
from nova import compute
+from nova import context
from nova import exception
from nova import flags
from nova import log as logging
-from nova import wsgi
+from nova import quota
from nova import utils
+from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
+import nova.api.openstack.views.addresses
+import nova.api.openstack.views.flavors
+import nova.api.openstack.views.servers
from nova.auth import manager as auth_manager
from nova.compute import instance_types
from nova.compute import power_state
import nova.api.openstack
+from nova.scheduler import api as scheduler_api
LOG = logging.getLogger('server')
-
-
FLAGS = flags.FLAGS
-def _translate_detail_keys(inst):
- """ Coerces into dictionary format, mapping everything to Rackspace-like
- attributes for return"""
- power_mapping = {
- None: 'build',
- power_state.NOSTATE: 'build',
- power_state.RUNNING: 'active',
- power_state.BLOCKED: 'active',
- power_state.SUSPENDED: 'suspended',
- power_state.PAUSED: 'paused',
- power_state.SHUTDOWN: 'active',
- power_state.SHUTOFF: 'active',
- power_state.CRASHED: 'error'}
- inst_dict = {}
-
- mapped_keys = dict(status='state', imageId='image_id',
- flavorId='instance_type', name='display_name', id='id')
-
- for k, v in mapped_keys.iteritems():
- inst_dict[k] = inst[v]
-
- inst_dict['status'] = power_mapping[inst_dict['status']]
- inst_dict['addresses'] = dict(public=[], private=[])
-
- # grab single private fixed ip
- private_ips = utils.get_from_path(inst, 'fixed_ip/address')
- inst_dict['addresses']['private'] = private_ips
-
- # grab all public floating ips
- public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
- inst_dict['addresses']['public'] = public_ips
-
- inst_dict['hostId'] = ''
-
- # Return the metadata as a dictionary
- metadata = {}
- for item in inst['metadata']:
- metadata[item['key']] = item['value']
- inst_dict['metadata'] = metadata
-
- return dict(server=inst_dict)
-
-
-def _translate_keys(inst):
- """ Coerces into dictionary format, excluding all model attributes
- save for id and name """
- return dict(server=dict(id=inst['id'], name=inst['display_name']))
-
-
class Controller(wsgi.Controller):
""" The Server API controller for the OpenStack API """
_serialization_metadata = {
- 'application/xml': {
+ "application/xml": {
"attributes": {
"server": ["id", "imageId", "name", "flavorId", "hostId",
- "status", "progress"]}}}
+ "status", "progress", "adminPass", "flavorRef",
+ "imageRef"],
+ "link": ["rel", "type", "href"],
+ },
+ },
+ }
def __init__(self):
self.compute_api = compute.API()
self._image_service = utils.import_object(FLAGS.image_service)
super(Controller, self).__init__()
+ def ips(self, req, id):
+ try:
+ instance = self.compute_api.get(req.environ['nova.context'], id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ builder = self._get_addresses_view_builder(req)
+ return builder.build(instance)
+
def index(self, req):
""" Returns a list of server names and ids for a given user """
- return self._items(req, entity_maker=_translate_keys)
+ return self._items(req, is_detail=False)
def detail(self, req):
""" Returns a list of server details for a given user """
- return self._items(req, entity_maker=_translate_detail_keys)
+ return self._items(req, is_detail=True)
- def _items(self, req, entity_maker):
+ def _items(self, req, is_detail):
"""Returns a list of servers for a given user.
- entity_maker - either _translate_detail_keys or _translate_keys
+ builder - the response model builder
"""
instance_list = self.compute_api.get_all(req.environ['nova.context'])
- limited_list = common.limited(instance_list, req)
- res = [entity_maker(inst)['server'] for inst in limited_list]
- return dict(servers=res)
+ limited_list = self._limit_items(instance_list, req)
+ builder = self._get_view_builder(req)
+ servers = [builder.build(inst, is_detail)['server']
+ for inst in limited_list]
+ return dict(servers=servers)
+ @scheduler_api.redirect_handler
def show(self, req, id):
""" Returns server details by server id """
try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
- return _translate_detail_keys(instance)
+ instance = self.compute_api.routing_get(
+ req.environ['nova.context'], id)
+ builder = self._get_view_builder(req)
+ return builder.build(instance, is_detail=True)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
+ @scheduler_api.redirect_handler
def delete(self, req, id):
""" Destroys a server """
try:
@@ -135,39 +112,25 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, req, image_id):
- """
- Machine images are associated with Kernels and Ramdisk images via
- metadata stored in Glance as 'image_properties'
- """
- def lookup(param):
- _image_id = image_id
- try:
- return image['properties'][param]
- except KeyError:
- LOG.debug(
- _("%(param)s property not found for image %(_image_id)s") %
- locals())
- return None
-
- image_id = str(image_id)
- image = self._image_service.show(req.environ['nova.context'], image_id)
- return lookup('kernel_id'), lookup('ramdisk_id')
-
def create(self, req):
""" Creates a new server for a given user """
- env = self._deserialize(req.body, req)
+ env = self._deserialize_create(req)
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
context = req.environ['nova.context']
+
+ key_name = None
+ key_data = None
key_pairs = auth_manager.AuthManager.get_key_pairs(context)
- if not key_pairs:
- raise exception.NotFound(_("No keypairs defined"))
- key_pair = key_pairs[0]
+ if key_pairs:
+ key_pair = key_pairs[0]
+ key_name = key_pair['name']
+ key_data = key_pair['public_key']
+ requested_image_id = self._image_id_from_req_data(env)
image_id = common.get_image_id_from_image_hash(self._image_service,
- context, env['server']['imageId'])
+ context, requested_image_id)
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
req, image_id)
@@ -181,47 +144,201 @@ class Controller(wsgi.Controller):
for k, v in env['server']['metadata'].items():
metadata.append({'key': k, 'value': v})
- instances = self.compute_api.create(
- context,
- instance_types.get_by_flavor_id(env['server']['flavorId']),
- image_id,
- kernel_id=kernel_id,
- ramdisk_id=ramdisk_id,
- display_name=env['server']['name'],
- display_description=env['server']['name'],
- key_name=key_pair['name'],
- key_data=key_pair['public_key'],
- metadata=metadata,
- onset_files=env.get('onset_files', []))
- return _translate_keys(instances[0])
+ personality = env['server'].get('personality')
+ injected_files = []
+ if personality:
+ injected_files = self._get_injected_files(personality)
+
+ flavor_id = self._flavor_id_from_req_data(env)
+
+ if not 'name' in env['server']:
+ msg = _("Server name is not defined")
+ return exc.HTTPBadRequest(msg)
+
+ name = env['server']['name']
+ self._validate_server_name(name)
+ name = name.strip()
+ try:
+ (inst,) = self.compute_api.create(
+ context,
+ instance_types.get_by_flavor_id(flavor_id),
+ image_id,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
+ display_name=name,
+ display_description=name,
+ key_name=key_name,
+ key_data=key_data,
+ metadata=metadata,
+ injected_files=injected_files)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+
+ inst['instance_type'] = flavor_id
+ inst['image_id'] = requested_image_id
+
+ builder = self._get_view_builder(req)
+ server = builder.build(inst, is_detail=True)
+ password = utils.generate_password(16)
+ server['server']['adminPass'] = password
+ self.compute_api.set_admin_password(context, server['server']['id'],
+ password)
+ return server
+
+ def _deserialize_create(self, request):
+ """
+ Deserialize a create request
+
+ Overrides normal behavior in the case of xml content
+ """
+ if request.content_type == "application/xml":
+ deserializer = ServerCreateRequestXMLDeserializer()
+ return deserializer.deserialize(request.body)
+ else:
+ return self._deserialize(request.body, request.get_content_type())
+
+ def _get_injected_files(self, personality):
+ """
+ Create a list of injected files from the personality attribute
+
+ At this time, injected_files must be formatted as a list of
+ (file_path, file_content) pairs for compatibility with the
+ underlying compute service.
+ """
+ injected_files = []
+
+ for item in personality:
+ try:
+ path = item['path']
+ contents = item['contents']
+ except KeyError as key:
+ expl = _('Bad personality format: missing %s') % key
+ raise exc.HTTPBadRequest(explanation=expl)
+ except TypeError:
+ expl = _('Bad personality format')
+ raise exc.HTTPBadRequest(explanation=expl)
+ try:
+ contents = base64.b64decode(contents)
+ except TypeError:
+ expl = _('Personality content for %s cannot be decoded') % path
+ raise exc.HTTPBadRequest(explanation=expl)
+ injected_files.append((path, contents))
+ return injected_files
+
+ def _handle_quota_error(self, error):
+ """
+ Reraise quota errors as api-specific http exceptions
+ """
+ if error.code == "OnsetFileLimitExceeded":
+ expl = _("Personality file limit exceeded")
+ raise exc.HTTPBadRequest(explanation=expl)
+ if error.code == "OnsetFilePathLimitExceeded":
+ expl = _("Personality file path too long")
+ raise exc.HTTPBadRequest(explanation=expl)
+ if error.code == "OnsetFileContentLimitExceeded":
+ expl = _("Personality file content too long")
+ raise exc.HTTPBadRequest(explanation=expl)
+ # if the original error is okay, just reraise it
+ raise error
+
+ @scheduler_api.redirect_handler
def update(self, req, id):
""" Updates the server name or password """
- inst_dict = self._deserialize(req.body, req)
+ if len(req.body) == 0:
+ raise exc.HTTPUnprocessableEntity()
+
+ inst_dict = self._deserialize(req.body, req.get_content_type())
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
ctxt = req.environ['nova.context']
update_dict = {}
- if 'adminPass' in inst_dict['server']:
- update_dict['admin_pass'] = inst_dict['server']['adminPass']
- try:
- self.compute_api.set_admin_password(ctxt, id)
- except exception.TimeoutException, e:
- return exc.HTTPRequestTimeout()
+
if 'name' in inst_dict['server']:
- update_dict['display_name'] = inst_dict['server']['name']
+ name = inst_dict['server']['name']
+ self._validate_server_name(name)
+ update_dict['display_name'] = name.strip()
+
+ self._parse_update(ctxt, id, inst_dict, update_dict)
+
try:
self.compute_api.update(ctxt, id, **update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
+
return exc.HTTPNoContent()
+ def _validate_server_name(self, value):
+ if not isinstance(value, basestring):
+ msg = _("Server name is not a string or unicode")
+ raise exc.HTTPBadRequest(msg)
+
+ if value.strip() == '':
+ msg = _("Server name is an empty string")
+ raise exc.HTTPBadRequest(msg)
+
+ def _parse_update(self, context, id, inst_dict, update_dict):
+ pass
+
+ @scheduler_api.redirect_handler
def action(self, req, id):
- """ Multi-purpose method used to reboot, rebuild, and
- resize a server """
- input_dict = self._deserialize(req.body, req)
- #TODO(sandy): rebuild/resize not supported.
+ """Multi-purpose method used to reboot, rebuild, or
+ resize a server"""
+
+ actions = {
+ 'changePassword': self._action_change_password,
+ 'reboot': self._action_reboot,
+ 'resize': self._action_resize,
+ 'confirmResize': self._action_confirm_resize,
+ 'revertResize': self._action_revert_resize,
+ 'rebuild': self._action_rebuild,
+ }
+
+ input_dict = self._deserialize(req.body, req.get_content_type())
+ for key in actions.keys():
+ if key in input_dict:
+ return actions[key](input_dict, req, id)
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def _action_change_password(self, input_dict, req, id):
+ return exc.HTTPNotImplemented()
+
+ def _action_confirm_resize(self, input_dict, req, id):
+ try:
+ self.compute_api.confirm_resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in confirm-resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPNoContent()
+
+ def _action_revert_resize(self, input_dict, req, id):
+ try:
+ self.compute_api.revert_resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in revert-resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
+
+ def _action_rebuild(self, input_dict, req, id):
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def _action_resize(self, input_dict, req, id):
+ """ Resizes a given instance to the flavor size requested """
+ try:
+ if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
+ flavor_id = input_dict['resize']['flavorId']
+ self.compute_api.resize(req.environ['nova.context'], id,
+ flavor_id)
+ else:
+ LOG.exception(_("Missing arguments for resize"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ except Exception, e:
+ LOG.exception(_("Error in resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return faults.Fault(exc.HTTPAccepted())
+
+ def _action_reboot(self, input_dict, req, id):
try:
reboot_type = input_dict['reboot']['type']
except Exception:
@@ -234,6 +351,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def lock(self, req, id):
"""
lock the instance with id
@@ -249,6 +367,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def unlock(self, req, id):
"""
unlock the instance with id
@@ -264,6 +383,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def get_lock(self, req, id):
"""
return the boolean state of (instance with id)'s lock
@@ -278,6 +398,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def reset_network(self, req, id):
"""
Reset networking on an instance (admin only).
@@ -292,6 +413,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def inject_network_info(self, req, id):
"""
Inject network info for an instance (admin only).
@@ -306,6 +428,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
@@ -317,6 +440,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def unpause(self, req, id):
""" Permit Admins to Unpause the server. """
ctxt = req.environ['nova.context']
@@ -328,6 +452,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def suspend(self, req, id):
"""permit admins to suspend the server"""
context = req.environ['nova.context']
@@ -339,6 +464,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def resume(self, req, id):
"""permit admins to resume the server from suspend"""
context = req.environ['nova.context']
@@ -350,8 +476,33 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
+ def rescue(self, req, id):
+ """Permit users to rescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.rescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::rescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
+ def unrescue(self, req, id):
+ """Permit users to unrescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.unrescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::unrescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
def get_ajax_console(self, req, id):
- """ Returns a url to an instance's ajaxterm console. """
+ """Returns a url to an instance's ajaxterm console."""
try:
self.compute_api.get_ajax_console(req.environ['nova.context'],
int(id))
@@ -359,6 +510,17 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
+ def get_vnc_console(self, req, id):
+ """Returns a url to an instance's ajaxterm console."""
+ try:
+ self.compute_api.get_vnc_console(req.environ['nova.context'],
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
def diagnostics(self, req, id):
"""Permit Admins to retrieve server diagnostics."""
ctxt = req.environ["nova.context"]
@@ -377,3 +539,187 @@ class Controller(wsgi.Controller):
action=item.action,
error=item.error))
return dict(actions=actions)
+
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """Fetch an image from the ImageService, then if present, return the
+ associated kernel and ramdisk image IDs.
+ """
+ context = req.environ['nova.context']
+ image_meta = self._image_service.show(context, image_id)
+ # NOTE(sirp): extracted to a separate method to aid unit-testing, the
+ # new method doesn't need a request obj or an ImageService stub
+ kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
+ image_meta)
+ return kernel_id, ramdisk_id
+
+ @staticmethod
+ def _do_get_kernel_ramdisk_from_image(image_meta):
+ """Given an ImageService image_meta, return kernel and ramdisk image
+ ids if present.
+
+ This is only valid for `ami` style images.
+ """
+ image_id = image_meta['id']
+ if image_meta['status'] != 'active':
+ raise exception.Invalid(
+ _("Cannot build from image %(image_id)s, status not active") %
+ locals())
+
+ if image_meta['properties']['disk_format'] != 'ami':
+ return None, None
+
+ try:
+ kernel_id = image_meta['properties']['kernel_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Kernel not found for image %(image_id)s") % locals())
+
+ try:
+ ramdisk_id = image_meta['properties']['ramdisk_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Ramdisk not found for image %(image_id)s") % locals())
+
+ return kernel_id, ramdisk_id
+
+
+class ControllerV10(Controller):
+ def _image_id_from_req_data(self, data):
+ return data['server']['imageId']
+
+ def _flavor_id_from_req_data(self, data):
+ return data['server']['flavorId']
+
+ def _get_view_builder(self, req):
+ addresses_builder = nova.api.openstack.views.addresses.ViewBuilderV10()
+ return nova.api.openstack.views.servers.ViewBuilderV10(
+ addresses_builder)
+
+ def _get_addresses_view_builder(self, req):
+ return nova.api.openstack.views.addresses.ViewBuilderV10(req)
+
+ def _limit_items(self, items, req):
+ return common.limited(items, req)
+
+ def _parse_update(self, context, server_id, inst_dict, update_dict):
+ if 'adminPass' in inst_dict['server']:
+ update_dict['admin_pass'] = inst_dict['server']['adminPass']
+ try:
+ self.compute_api.set_admin_password(context, server_id)
+ except exception.TimeoutException:
+ return exc.HTTPRequestTimeout()
+
+
+class ControllerV11(Controller):
+ def _image_id_from_req_data(self, data):
+ href = data['server']['imageRef']
+ return common.get_id_from_href(href)
+
+ def _flavor_id_from_req_data(self, data):
+ href = data['server']['flavorRef']
+ return common.get_id_from_href(href)
+
+ def _get_view_builder(self, req):
+ base_url = req.application_url
+ flavor_builder = nova.api.openstack.views.flavors.ViewBuilderV11(
+ base_url)
+ image_builder = nova.api.openstack.views.images.ViewBuilderV11(
+ base_url)
+ addresses_builder = nova.api.openstack.views.addresses.ViewBuilderV11()
+ return nova.api.openstack.views.servers.ViewBuilderV11(
+ addresses_builder, flavor_builder, image_builder, base_url)
+
+ def _get_addresses_view_builder(self, req):
+ return nova.api.openstack.views.addresses.ViewBuilderV11(req)
+
+ def _action_change_password(self, input_dict, req, id):
+ context = req.environ['nova.context']
+ if (not 'changePassword' in input_dict
+ or not 'adminPass' in input_dict['changePassword']):
+ msg = _("No adminPass was specified")
+ return exc.HTTPBadRequest(msg)
+ password = input_dict['changePassword']['adminPass']
+ if not isinstance(password, basestring) or password == '':
+ msg = _("Invalid adminPass")
+ return exc.HTTPBadRequest(msg)
+ self.compute_api.set_admin_password(context, id, password)
+ return exc.HTTPAccepted()
+
+ def _limit_items(self, items, req):
+ return common.limited_by_marker(items, req)
+
+
+class ServerCreateRequestXMLDeserializer(object):
+ """
+ Deserializer to handle xml-formatted server create requests.
+
+ Handles standard server attributes as well as optional metadata
+ and personality attributes
+ """
+
+ def deserialize(self, string):
+ """Deserialize an xml-formatted server create request"""
+ dom = minidom.parseString(string)
+ server = self._extract_server(dom)
+ return {'server': server}
+
+ def _extract_server(self, node):
+ """Marshal the server attribute of a parsed request"""
+ server = {}
+ server_node = self._find_first_child_named(node, 'server')
+ for attr in ["name", "imageId", "flavorId"]:
+ server[attr] = server_node.getAttribute(attr)
+ metadata = self._extract_metadata(server_node)
+ if metadata is not None:
+ server["metadata"] = metadata
+ personality = self._extract_personality(server_node)
+ if personality is not None:
+ server["personality"] = personality
+ return server
+
+ def _extract_metadata(self, server_node):
+ """Marshal the metadata attribute of a parsed request"""
+ metadata_node = self._find_first_child_named(server_node, "metadata")
+ if metadata_node is None:
+ return None
+ metadata = {}
+ for meta_node in self._find_children_named(metadata_node, "meta"):
+ key = meta_node.getAttribute("key")
+ metadata[key] = self._extract_text(meta_node)
+ return metadata
+
+ def _extract_personality(self, server_node):
+ """Marshal the personality attribute of a parsed request"""
+ personality_node = \
+ self._find_first_child_named(server_node, "personality")
+ if personality_node is None:
+ return None
+ personality = []
+ for file_node in self._find_children_named(personality_node, "file"):
+ item = {}
+ if file_node.hasAttribute("path"):
+ item["path"] = file_node.getAttribute("path")
+ item["contents"] = self._extract_text(file_node)
+ personality.append(item)
+ return personality
+
+ def _find_first_child_named(self, parent, name):
+ """Search a nodes children for the first child with a given name"""
+ for node in parent.childNodes:
+ if node.nodeName == name:
+ return node
+ return None
+
+ def _find_children_named(self, parent, name):
+ """Return all of a nodes children who have the given name"""
+ for node in parent.childNodes:
+ if node.nodeName == name:
+ yield node
+
+ def _extract_text(self, node):
+ """Get the text field contained by the given node"""
+ if len(node.childNodes) == 1:
+ child = node.childNodes[0]
+ if child.nodeType == child.TEXT_NODE:
+ return child.nodeValue
+ return ""
diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py
index 5d78f9377..ee7991d7f 100644
--- a/nova/api/openstack/shared_ip_groups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -42,11 +42,11 @@ class Controller(wsgi.Controller):
def index(self, req):
""" Returns a list of Shared IP Groups for the user """
- return dict(sharedIpGroups=[])
+ raise faults.Fault(exc.HTTPNotImplemented())
def show(self, req, id):
""" Shows in-depth information on a specific Shared IP Group """
- return _translate_keys({})
+ raise faults.Fault(exc.HTTPNotImplemented())
def update(self, req, id):
""" You can't update a Shared IP Group """
@@ -58,7 +58,7 @@ class Controller(wsgi.Controller):
def detail(self, req):
""" Returns a complete list of Shared IP Groups """
- return _translate_detail_keys({})
+ raise faults.Fault(exc.HTTPNotImplemented())
def create(self, req):
""" Creates a new Shared IP group """
diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py
new file mode 100644
index 000000000..d3ab3d553
--- /dev/null
+++ b/nova/api/openstack/users.py
@@ -0,0 +1,104 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import wsgi
+from nova.api.openstack import common
+from nova.api.openstack import faults
+from nova.auth import manager
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.api.openstack')
+
+
+def _translate_keys(user):
+ return dict(id=user.id,
+ name=user.name,
+ access=user.access,
+ secret=user.secret,
+ admin=user.admin)
+
+
+class Controller(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "user": ["id", "name", "access", "secret", "admin"]}}}
+
+ def __init__(self):
+ self.manager = manager.AuthManager()
+
+ def _check_admin(self, context):
+ """We cannot depend on the db layer to check for admin access
+ for the auth manager, so we do it here"""
+ if not context.is_admin:
+ raise exception.NotAuthorized(_("Not admin user"))
+
+ def index(self, req):
+ """Return all users in brief"""
+ users = self.manager.get_users()
+ users = common.limited(users, req)
+ users = [_translate_keys(user) for user in users]
+ return dict(users=users)
+
+ def detail(self, req):
+ """Return all users in detail"""
+ return self.index(req)
+
+ def show(self, req, id):
+ """Return data about the given user id"""
+
+ #NOTE(justinsb): The drivers are a little inconsistent in how they
+ # deal with "NotFound" - some throw, some return None.
+ try:
+ user = self.manager.get_user(id)
+ except exception.NotFound:
+ user = None
+
+ if user is None:
+ raise faults.Fault(exc.HTTPNotFound())
+
+ return dict(user=_translate_keys(user))
+
+ def delete(self, req, id):
+ self._check_admin(req.environ['nova.context'])
+ self.manager.delete_user(id)
+ return {}
+
+ def create(self, req):
+ self._check_admin(req.environ['nova.context'])
+ env = self._deserialize(req.body, req.get_content_type())
+ is_admin = env['user'].get('admin') in ('T', 'True', True)
+ name = env['user'].get('name')
+ access = env['user'].get('access')
+ secret = env['user'].get('secret')
+ user = self.manager.create_user(name, access, secret, is_admin)
+ return dict(user=_translate_keys(user))
+
+ def update(self, req, id):
+ self._check_admin(req.environ['nova.context'])
+ env = self._deserialize(req.body, req.get_content_type())
+ is_admin = env['user'].get('admin')
+ if is_admin is not None:
+ is_admin = is_admin in ('T', 'True', True)
+ access = env['user'].get('access')
+ secret = env['user'].get('secret')
+ self.manager.modify_user(id, access, secret, is_admin)
+ return dict(user=_translate_keys(self.manager.get_user(id)))
diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py
new file mode 100644
index 000000000..3f9d91934
--- /dev/null
+++ b/nova/api/openstack/versions.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+import webob.dec
+
+from nova import wsgi
+import nova.api.openstack.views.versions
+
+
+class Versions(wsgi.Application):
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ """Respond to a request for all OpenStack API versions."""
+ version_objs = [
+ {
+ "id": "v1.1",
+ "status": "CURRENT",
+ },
+ {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ },
+ ]
+
+ builder = nova.api.openstack.views.versions.get_view_builder(req)
+ versions = [builder.build(version) for version in version_objs]
+ response = dict(versions=versions)
+
+ metadata = {
+ "application/xml": {
+ "attributes": {
+ "version": ["status", "id"],
+ "link": ["rel", "href"],
+ }
+ }
+ }
+
+ content_type = req.best_match_content_type()
+ body = wsgi.Serializer(metadata).serialize(response, content_type)
+
+ response = webob.Response()
+ response.content_type = content_type
+ response.body = body
+
+ return response
diff --git a/nova/api/openstack/views/__init__.py b/nova/api/openstack/views/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/api/openstack/views/__init__.py
diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py
new file mode 100644
index 000000000..90c77855b
--- /dev/null
+++ b/nova/api/openstack/views/addresses.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import utils
+from nova.api.openstack import common
+
+
+class ViewBuilder(object):
+ ''' Models a server addresses response as a python dictionary.'''
+
+ def build(self, inst):
+ raise NotImplementedError()
+
+
+class ViewBuilderV10(ViewBuilder):
+ def build(self, inst):
+ private_ips = utils.get_from_path(inst, 'fixed_ip/address')
+ public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ return dict(public=public_ips, private=private_ips)
+
+
+class ViewBuilderV11(ViewBuilder):
+ def build(self, inst):
+ private_ips = utils.get_from_path(inst, 'fixed_ip/address')
+ private_ips = [dict(version=4, addr=a) for a in private_ips]
+ public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ public_ips = [dict(version=4, addr=a) for a in public_ips]
+ return dict(public=public_ips, private=private_ips)
diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py
new file mode 100644
index 000000000..462890ab2
--- /dev/null
+++ b/nova/api/openstack/views/flavors.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import common
+
+
+class ViewBuilder(object):
+
+ def build(self, flavor_obj, is_detail=False):
+ """Generic method used to generate a flavor entity."""
+ if is_detail:
+ flavor = self._build_detail(flavor_obj)
+ else:
+ flavor = self._build_simple(flavor_obj)
+
+ self._build_extra(flavor)
+
+ return flavor
+
+ def _build_simple(self, flavor_obj):
+ """Build a minimal representation of a flavor."""
+ return {
+ "id": flavor_obj["flavorid"],
+ "name": flavor_obj["name"],
+ }
+
+ def _build_detail(self, flavor_obj):
+ """Build a more complete representation of a flavor."""
+ simple = self._build_simple(flavor_obj)
+
+ detail = {
+ "ram": flavor_obj["memory_mb"],
+ "disk": flavor_obj["local_gb"],
+ }
+
+ detail.update(simple)
+
+ return detail
+
+ def _build_extra(self, flavor_obj):
+ """Hook for version-specific changes to newly created flavor object."""
+ pass
+
+
+class ViewBuilderV11(ViewBuilder):
+ """Openstack API v1.1 flavors view builder."""
+
+ def __init__(self, base_url):
+ """
+ :param base_url: url of the root wsgi application
+ """
+ self.base_url = base_url
+
+ def _build_extra(self, flavor_obj):
+ flavor_obj["links"] = self._build_links(flavor_obj)
+
+ def _build_links(self, flavor_obj):
+ """Generate a container of links that refer to the provided flavor."""
+ href = self.generate_href(flavor_obj["id"])
+
+ links = [
+ {
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ },
+ ]
+
+ return links
+
+ def generate_href(self, flavor_id):
+ """Create an url that refers to a specific flavor id."""
+ return "%s/flavors/%s" % (self.base_url, flavor_id)
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
new file mode 100644
index 000000000..16195b050
--- /dev/null
+++ b/nova/api/openstack/views/images.py
@@ -0,0 +1,114 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os.path
+
+
+class ViewBuilder(object):
+ """Base class for generating responses to OpenStack API image requests."""
+
+ def __init__(self, base_url):
+ """Initialize new `ViewBuilder`."""
+ self._url = base_url
+
+ def _format_dates(self, image):
+ """Update all date fields to ensure standardized formatting."""
+ for attr in ['created_at', 'updated_at', 'deleted_at']:
+ if image.get(attr) is not None:
+ image[attr] = image[attr].strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ def _format_status(self, image):
+ """Update the status field to standardize format."""
+ status_mapping = {
+ 'pending': 'queued',
+ 'decrypting': 'preparing',
+ 'untarring': 'saving',
+ 'available': 'active',
+ 'killed': 'failed',
+ }
+
+ try:
+ image['status'] = status_mapping[image['status']].upper()
+ except KeyError:
+ image['status'] = image['status'].upper()
+
+ def generate_href(self, image_id):
+ """Return an href string pointing to this object."""
+ return os.path.join(self._url, "images", str(image_id))
+
+ def build(self, image_obj, detail=False):
+ """Return a standardized image structure for display by the API."""
+ properties = image_obj.get("properties", {})
+
+ self._format_dates(image_obj)
+
+ if "status" in image_obj:
+ self._format_status(image_obj)
+
+ image = {
+ "id": image_obj.get("id"),
+ "name": image_obj.get("name"),
+ }
+
+ if "instance_id" in properties:
+ try:
+ image["serverId"] = int(properties["instance_id"])
+ except ValueError:
+ pass
+
+ if detail:
+ image.update({
+ "created": image_obj.get("created_at"),
+ "updated": image_obj.get("updated_at"),
+ "status": image_obj.get("status"),
+ })
+
+ if image["status"] == "SAVING":
+ image["progress"] = 0
+
+ return image
+
+
+class ViewBuilderV10(ViewBuilder):
+ """OpenStack API v1.0 Image Builder"""
+ pass
+
+
+class ViewBuilderV11(ViewBuilder):
+ """OpenStack API v1.1 Image Builder"""
+
+ def build(self, image_obj, detail=False):
+ """Return a standardized image structure for display by the API."""
+ image = ViewBuilder.build(self, image_obj, detail)
+ href = self.generate_href(image_obj["id"])
+
+ image["links"] = [{
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ }]
+
+ return image
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
new file mode 100644
index 000000000..d24c025be
--- /dev/null
+++ b/nova/api/openstack/views/servers.py
@@ -0,0 +1,168 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+import os
+
+from nova.compute import power_state
+import nova.compute
+import nova.context
+from nova.api.openstack import common
+from nova.api.openstack.views import addresses as addresses_view
+from nova.api.openstack.views import flavors as flavors_view
+from nova.api.openstack.views import images as images_view
+from nova import utils
+
+
+class ViewBuilder(object):
+ """Model a server response as a python dictionary.
+
+ Public methods: build
+ Abstract methods: _build_image, _build_flavor
+
+ """
+
+ def __init__(self, addresses_builder):
+ self.addresses_builder = addresses_builder
+
+ def build(self, inst, is_detail):
+ """Return a dict that represenst a server."""
+ if is_detail:
+ server = self._build_detail(inst)
+ else:
+ server = self._build_simple(inst)
+
+ self._build_extra(server, inst)
+
+ return server
+
+ def _build_simple(self, inst):
+ """Return a simple model of a server."""
+ return dict(server=dict(id=inst['id'], name=inst['display_name']))
+
+ def _build_detail(self, inst):
+ """Returns a detailed model of a server."""
+ power_mapping = {
+ None: 'BUILD',
+ power_state.NOSTATE: 'BUILD',
+ power_state.RUNNING: 'ACTIVE',
+ power_state.BLOCKED: 'ACTIVE',
+ power_state.SUSPENDED: 'SUSPENDED',
+ power_state.PAUSED: 'PAUSED',
+ power_state.SHUTDOWN: 'ACTIVE',
+ power_state.SHUTOFF: 'ACTIVE',
+ power_state.CRASHED: 'ERROR',
+ power_state.FAILED: 'ERROR'}
+
+ inst_dict = {
+ 'id': int(inst['id']),
+ 'name': inst['display_name'],
+ 'addresses': self.addresses_builder.build(inst),
+ 'status': power_mapping[inst.get('state')]}
+
+ ctxt = nova.context.get_admin_context()
+ compute_api = nova.compute.API()
+ if compute_api.has_finished_migration(ctxt, inst['id']):
+ inst_dict['status'] = 'RESIZE-CONFIRM'
+
+ # Return the metadata as a dictionary
+ metadata = {}
+ for item in inst.get('metadata', []):
+ metadata[item['key']] = item['value']
+ inst_dict['metadata'] = metadata
+
+ inst_dict['hostId'] = ''
+ if inst.get('host'):
+ inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest()
+
+ self._build_image(inst_dict, inst)
+ self._build_flavor(inst_dict, inst)
+
+ return dict(server=inst_dict)
+
+ def _build_image(self, response, inst):
+ """Return the image sub-resource of a server."""
+ raise NotImplementedError()
+
+ def _build_flavor(self, response, inst):
+ """Return the flavor sub-resource of a server."""
+ raise NotImplementedError()
+
+ def _build_extra(self, response, inst):
+ pass
+
+
+class ViewBuilderV10(ViewBuilder):
+ """Model an Openstack API V1.0 server response."""
+
+ def _build_image(self, response, inst):
+ if 'image_id' in dict(inst):
+ response['imageId'] = inst['image_id']
+
+ def _build_flavor(self, response, inst):
+ if 'instance_type' in dict(inst):
+ response['flavorId'] = inst['instance_type']
+
+
+class ViewBuilderV11(ViewBuilder):
+ """Model an Openstack API V1.0 server response."""
+ def __init__(self, addresses_builder, flavor_builder, image_builder,
+ base_url):
+ ViewBuilder.__init__(self, addresses_builder)
+ self.flavor_builder = flavor_builder
+ self.image_builder = image_builder
+ self.base_url = base_url
+
+ def _build_image(self, response, inst):
+ if "image_id" in dict(inst):
+ image_id = inst.get("image_id")
+ response["imageRef"] = self.image_builder.generate_href(image_id)
+
+ def _build_flavor(self, response, inst):
+ if "instance_type" in dict(inst):
+ flavor_id = inst["instance_type"]
+ flavor_ref = self.flavor_builder.generate_href(flavor_id)
+ response["flavorRef"] = flavor_ref
+
+ def _build_extra(self, response, inst):
+ self._build_links(response, inst)
+
+ def _build_links(self, response, inst):
+ href = self.generate_href(inst["id"])
+
+ links = [
+ {
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ },
+ ]
+
+ response["server"]["links"] = links
+
+ def generate_href(self, server_id):
+ """Create an url that refers to a specific server id."""
+ return os.path.join(self.base_url, "servers", str(server_id))
diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py
new file mode 100644
index 000000000..d0145c94a
--- /dev/null
+++ b/nova/api/openstack/views/versions.py
@@ -0,0 +1,59 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+
+def get_view_builder(req):
+ base_url = req.application_url
+ return ViewBuilder(base_url)
+
+
+class ViewBuilder(object):
+
+ def __init__(self, base_url):
+ """
+ :param base_url: url of the root wsgi application
+ """
+ self.base_url = base_url
+
+ def build(self, version_data):
+ """Generic method used to generate a version entity."""
+ version = {
+ "id": version_data["id"],
+ "status": version_data["status"],
+ "links": self._build_links(version_data),
+ }
+
+ return version
+
+ def _build_links(self, version_data):
+ """Generate a container of links that refer to the provided version."""
+ href = self.generate_href(version_data["id"])
+
+ links = [
+ {
+ "rel": "self",
+ "href": href,
+ },
+ ]
+
+ return links
+
+ def generate_href(self, version_number):
+ """Create an url that refers to a specific version_number."""
+ return os.path.join(self.base_url, version_number)
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
index d5206da20..846cb48a1 100644
--- a/nova/api/openstack/zones.py
+++ b/nova/api/openstack/zones.py
@@ -1,4 +1,4 @@
-# Copyright 2010 OpenStack LLC.
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,9 +15,11 @@
import common
+from nova import db
from nova import flags
+from nova import log as logging
from nova import wsgi
-from nova import db
+from nova.scheduler import api
FLAGS = flags.FLAGS
@@ -32,8 +34,13 @@ def _filter_keys(item, keys):
return dict((k, v) for k, v in item.iteritems() if k in keys)
+def _exclude_keys(item, keys):
+ return dict((k, v) for k, v in item.iteritems() if k not in keys)
+
+
def _scrub_zone(zone):
- return _filter_keys(zone, ('id', 'api_url'))
+ return _exclude_keys(zone, ('username', 'password', 'created_at',
+ 'deleted', 'deleted_at', 'updated_at'))
class Controller(wsgi.Controller):
@@ -41,11 +48,13 @@ class Controller(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
- "zone": ["id", "api_url"]}}}
+ "zone": ["id", "api_url", "name", "capabilities"]}}}
def index(self, req):
"""Return all zones in brief"""
- items = db.zone_get_all(req.environ['nova.context'])
+ # Ask the ZoneManager in the Scheduler for most recent data,
+ # or fall-back to the database ...
+ items = api.get_zone_list(req.environ['nova.context'])
items = common.limited(items, req)
items = [_scrub_zone(item) for item in items]
return dict(zones=items)
@@ -54,26 +63,39 @@ class Controller(wsgi.Controller):
"""Return all zones in detail"""
return self.index(req)
+ def info(self, req):
+ """Return name and capabilities for this zone."""
+ items = api.get_zone_capabilities(req.environ['nova.context'])
+
+ zone = dict(name=FLAGS.zone_name)
+ caps = FLAGS.zone_capabilities
+ for cap in caps:
+ key, value = cap.split('=')
+ zone[key] = value
+ for item, (min_value, max_value) in items.iteritems():
+ zone[item] = "%s,%s" % (min_value, max_value)
+ return dict(zone=zone)
+
def show(self, req, id):
"""Return data about the given zone id"""
zone_id = int(id)
- zone = db.zone_get(req.environ['nova.context'], zone_id)
+ zone = api.zone_get(req.environ['nova.context'], zone_id)
return dict(zone=_scrub_zone(zone))
def delete(self, req, id):
zone_id = int(id)
- db.zone_delete(req.environ['nova.context'], zone_id)
+ api.zone_delete(req.environ['nova.context'], zone_id)
return {}
def create(self, req):
context = req.environ['nova.context']
- env = self._deserialize(req.body, req)
- zone = db.zone_create(context, env["zone"])
+ env = self._deserialize(req.body, req.get_content_type())
+ zone = api.zone_create(context, env["zone"])
return dict(zone=_scrub_zone(zone))
def update(self, req, id):
context = req.environ['nova.context']
- env = self._deserialize(req.body, req)
+ env = self._deserialize(req.body, req.get_content_type())
zone_id = int(id)
- zone = db.zone_update(context, zone_id, env["zone"])
+ zone = api.zone_update(context, zone_id, env["zone"])
return dict(zone=_scrub_zone(zone))
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index d8dad8edd..d1e3f2ed5 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -162,6 +162,8 @@ class DbDriver(object):
values['description'] = description
db.project_update(context.get_admin_context(), project_id, values)
+ if not self.is_in_project(manager_uid, project_id):
+ self.add_to_project(manager_uid, project_id)
def add_to_project(self, uid, project_id):
"""Add user to project"""
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 4466051f0..79afb9109 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -90,12 +90,12 @@ MOD_DELETE = 1
MOD_REPLACE = 2
-class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
+class NO_SUCH_OBJECT(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
-class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
+class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
@@ -268,7 +268,7 @@ class FakeLDAP(object):
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
- # pylint: disable-msg=E1103
+ # pylint: disable=E1103
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
@@ -277,12 +277,12 @@ class FakeLDAP(object):
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((key[len(self.__prefix):], attrs))
- # pylint: enable-msg=E1103
+ # pylint: enable=E1103
if objects == []:
raise NO_SUCH_OBJECT()
return objects
@property
- def __prefix(self): # pylint: disable-msg=R0201
+ def __prefix(self): # pylint: disable=R0201
"""Get the prefix to use for all keys."""
return 'ldap:'
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 5da7751a0..fcac55510 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -275,6 +275,8 @@ class LdapDriver(object):
attr.append((self.ldap.MOD_REPLACE, 'description', description))
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
+ if not self.is_in_project(manager_uid, project_id):
+ self.add_to_project(manager_uid, project_id)
@sanitize
def add_to_project(self, uid, project_id):
@@ -632,6 +634,6 @@ class LdapDriver(object):
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
- def __init__(self): # pylint: disable-msg=W0231
+ def __init__(self): # pylint: disable=W0231
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 84c8a6cb2..12ded1207 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -22,7 +22,7 @@ Nova authentication management
import os
import shutil
-import string # pylint: disable-msg=W0402
+import string # pylint: disable=W0402
import tempfile
import uuid
import zipfile
@@ -96,10 +96,19 @@ class AuthBase(object):
class User(AuthBase):
- """Object representing a user"""
+ """Object representing a user
+
+ The following attributes are defined:
+ :id: A system identifier for the user. A string (for LDAP)
+ :name: The user name, potentially in some more friendly format
+ :access: The 'username' for EC2 authentication
+ :secret: The 'password' for EC2 authenticatoin
+ :admin: ???
+ """
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
+ assert isinstance(id, basestring)
self.id = id
self.name = name
self.access = access
diff --git a/nova/compute/api.py b/nova/compute/api.py
index c475e3bff..996955fe3 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -34,12 +34,17 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
+from nova.scheduler import api as scheduler_api
from nova.db import base
-FLAGS = flags.FLAGS
+
LOG = logging.getLogger('nova.compute.api')
+FLAGS = flags.FLAGS
+flags.DECLARE('vncproxy_topic', 'nova.vnc')
+
+
def generate_default_hostname(instance_id):
"""Default function to generate a hostname given an instance reference."""
return str(instance_id)
@@ -80,17 +85,36 @@ class API(base.Base):
topic,
{"method": "get_network_topic", "args": {'fake': 1}})
+ def _check_injected_file_quota(self, context, injected_files):
+ """
+ Enforce quota limits on injected files
+
+ Raises a QuotaError if any limit is exceeded
+ """
+ if injected_files is None:
+ return
+ limit = quota.allowed_injected_files(context)
+ if len(injected_files) > limit:
+ raise quota.QuotaError(code="OnsetFileLimitExceeded")
+ path_limit = quota.allowed_injected_file_path_bytes(context)
+ content_limit = quota.allowed_injected_file_content_bytes(context)
+ for path, content in injected_files:
+ if len(path) > path_limit:
+ raise quota.QuotaError(code="OnsetFilePathLimitExceeded")
+ if len(content) > content_limit:
+ raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
+
def create(self, context, instance_type,
image_id, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata=[],
- onset_files=None):
+ injected_files=None):
"""Create the number of instances requested if quota and
- other arguments check out ok.
- """
- type_data = instance_types.INSTANCE_TYPES[instance_type]
+ other arguments check out ok."""
+
+ type_data = instance_types.get_instance_type(instance_type)
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
pid = context.project_id
@@ -124,11 +148,19 @@ class API(base.Base):
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
+ self._check_injected_file_quota(context, injected_files)
+
image = self.image_service.show(context, image_id)
+
+ os_type = None
+ if 'properties' in image and 'os_type' in image['properties']:
+ os_type = image['properties']['os_type']
+
if kernel_id is None:
- kernel_id = image.get('kernel_id', None)
+ kernel_id = image['properties'].get('kernel_id', None)
if ramdisk_id is None:
- ramdisk_id = image.get('ramdisk_id', None)
+ ramdisk_id = image['properties'].get('ramdisk_id', None)
+ # FIXME(sirp): is there a way we can remove null_kernel?
# No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
@@ -164,6 +196,7 @@ class API(base.Base):
'image_id': image_id,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
+ 'state': 0,
'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
@@ -179,7 +212,8 @@ class API(base.Base):
'key_data': key_data,
'locked': False,
'metadata': metadata,
- 'availability_zone': availability_zone}
+ 'availability_zone': availability_zone,
+ 'os_type': os_type}
elevated = context.elevated()
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
@@ -217,13 +251,23 @@ class API(base.Base):
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
"availability_zone": availability_zone,
- "onset_files": onset_files}})
+ "injected_files": injected_files}})
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
return [dict(x.iteritems()) for x in instances]
+ def has_finished_migration(self, context, instance_id):
+ """Retrieves whether or not a finished migration exists for
+ an instance"""
+ try:
+ db.migration_get_by_instance_and_status(context, instance_id,
+ 'finished')
+ return True
+ except exception.NotFound:
+ return False
+
def ensure_default_security_group(self, context):
""" Create security group for the security context if it
does not already exist
@@ -313,17 +357,18 @@ class API(base.Base):
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
+ @scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id):
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound:
- LOG.warning(_("Instance %d was not found during terminate"),
+ LOG.warning(_("Instance %s was not found during terminate"),
instance_id)
raise
if (instance['state_description'] == 'terminating'):
- LOG.warning(_("Instance %d is already being terminated"),
+ LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
@@ -345,24 +390,37 @@ class API(base.Base):
rv = self.db.instance_get(context, instance_id)
return dict(rv.iteritems())
+ @scheduler_api.reroute_compute("get")
+ def routing_get(self, context, instance_id):
+ """Use this method instead of get() if this is the only
+ operation you intend to to. It will route to novaclient.get
+ if the instance is not found."""
+ return self.get(context, instance_id)
+
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None):
"""Get all instances, possibly filtered by one of the
given parameters. If there is no filter and the context is
- an admin, it will retreive all instances in the system."""
+ an admin, it will retreive all instances in the system.
+ """
if reservation_id is not None:
- return self.db.instance_get_all_by_reservation(context,
- reservation_id)
+ return self.db.instance_get_all_by_reservation(
+ context, reservation_id)
+
if fixed_ip is not None:
return self.db.fixed_ip_get_instance(context, fixed_ip)
+
if project_id or not context.is_admin:
if not context.project:
- return self.db.instance_get_all_by_user(context,
- context.user_id)
+ return self.db.instance_get_all_by_user(
+ context, context.user_id)
+
if project_id is None:
project_id = context.project_id
- return self.db.instance_get_all_by_project(context,
- project_id)
+
+ return self.db.instance_get_all_by_project(
+ context, project_id)
+
return self.db.instance_get_all(context)
def _cast_compute_message(self, method, context, instance_id, host=None,
@@ -403,30 +461,105 @@ class API(base.Base):
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
+ def _cast_scheduler_message(self, context, args):
+ """Generic handler for RPC calls to the scheduler"""
+ rpc.cast(context, FLAGS.scheduler_topic, args)
+
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance.
:retval: A dict containing image metadata
"""
- data = {'name': name, 'is_public': False}
- image_meta = self.image_service.create(context, data)
- params = {'image_id': image_meta['id']}
+ properties = {'instance_id': str(instance_id),
+ 'user_id': str(context.user_id)}
+ sent_meta = {'name': name, 'is_public': False,
+ 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+ params = {'image_id': recv_meta['id']}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
- return image_meta
+ return recv_meta
def reboot(self, context, instance_id):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
+ def revert_resize(self, context, instance_id):
+ """Reverts a resize, deleting the 'new' instance in the process"""
+ context = context.elevated()
+ migration_ref = self.db.migration_get_by_instance_and_status(context,
+ instance_id, 'finished')
+ if not migration_ref:
+ raise exception.NotFound(_("No finished migrations found for "
+ "instance"))
+
+ params = {'migration_id': migration_ref['id']}
+ self._cast_compute_message('revert_resize', context, instance_id,
+ migration_ref['dest_compute'], params=params)
+ self.db.migration_update(context, migration_ref['id'],
+ {'status': 'reverted'})
+
+ def confirm_resize(self, context, instance_id):
+ """Confirms a migration/resize, deleting the 'old' instance in the
+ process."""
+ context = context.elevated()
+ migration_ref = self.db.migration_get_by_instance_and_status(context,
+ instance_id, 'finished')
+ if not migration_ref:
+ raise exception.NotFound(_("No finished migrations found for "
+ "instance"))
+ instance_ref = self.db.instance_get(context, instance_id)
+ params = {'migration_id': migration_ref['id']}
+ self._cast_compute_message('confirm_resize', context, instance_id,
+ migration_ref['source_compute'], params=params)
+
+ self.db.migration_update(context, migration_ref['id'],
+ {'status': 'confirmed'})
+ self.db.instance_update(context, instance_id,
+ {'host': migration_ref['dest_compute'], })
+
+ def resize(self, context, instance_id, flavor_id):
+ """Resize a running instance."""
+ instance = self.db.instance_get(context, instance_id)
+ current_instance_type = self.db.instance_type_get_by_name(
+ context, instance['instance_type'])
+
+ new_instance_type = self.db.instance_type_get_by_flavor_id(
+ context, flavor_id)
+ current_instance_type_name = current_instance_type['name']
+ new_instance_type_name = new_instance_type['name']
+ LOG.debug(_("Old instance type %(current_instance_type_name)s, "
+ " new instance type %(new_instance_type_name)s") % locals())
+ if not new_instance_type:
+ raise exception.ApiError(_("Requested flavor %(flavor_id)d "
+ "does not exist") % locals())
+
+ current_memory_mb = current_instance_type['memory_mb']
+ new_memory_mb = new_instance_type['memory_mb']
+ if current_memory_mb > new_memory_mb:
+ raise exception.ApiError(_("Invalid flavor: cannot downsize"
+ "instances"))
+ if current_memory_mb == new_memory_mb:
+ raise exception.ApiError(_("Invalid flavor: cannot use"
+ "the same flavor. "))
+
+ self._cast_scheduler_message(context,
+ {"method": "prep_resize",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id,
+ "flavor_id": flavor_id}})
+
+ @scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
self._cast_compute_message('pause_instance', context, instance_id)
+ @scheduler_api.reroute_compute("unpause")
def unpause(self, context, instance_id):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
+ @scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
return self._call_compute_message(
@@ -438,25 +571,30 @@ class API(base.Base):
"""Retrieve actions for the given instance."""
return self.db.instance_get_actions(context, instance_id)
+ @scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance_id):
"""suspend the instance with instance_id"""
self._cast_compute_message('suspend_instance', context, instance_id)
+ @scheduler_api.reroute_compute("resume")
def resume(self, context, instance_id):
"""resume the instance with instance_id"""
self._cast_compute_message('resume_instance', context, instance_id)
+ @scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance_id):
"""Rescue the given instance."""
self._cast_compute_message('rescue_instance', context, instance_id)
+ @scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
self._cast_compute_message('unrescue_instance', context, instance_id)
- def set_admin_password(self, context, instance_id):
+ def set_admin_password(self, context, instance_id, password=None):
"""Set the root/admin password for the given instance."""
- self._cast_compute_message('set_admin_password', context, instance_id)
+ self._cast_compute_message('set_admin_password', context, instance_id,
+ password)
def inject_file(self, context, instance_id):
"""Write a file to the given instance."""
@@ -464,7 +602,6 @@ class API(base.Base):
def get_ajax_console(self, context, instance_id):
"""Get a url to an AJAX Console"""
- instance = self.get(context, instance_id)
output = self._call_compute_message('get_ajax_console',
context,
instance_id)
@@ -475,6 +612,25 @@ class API(base.Base):
return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
+ def get_vnc_console(self, context, instance_id):
+ """Get a url to a VNC Console."""
+ instance = self.get(context, instance_id)
+ output = self._call_compute_message('get_vnc_console',
+ context,
+ instance_id)
+ rpc.call(context, '%s' % FLAGS.vncproxy_topic,
+ {'method': 'authorize_vnc_console',
+ 'args': {'token': output['token'],
+ 'host': output['host'],
+ 'port': output['port']}})
+
+ # hostignore and portignore are compatability params for noVNC
+ return {'url': '%s/vnc_auto.html?token=%s&host=%s&port=%s' % (
+ FLAGS.vncproxy_url,
+ output['token'],
+ 'hostignore',
+ 'portignore')}
+
def get_console_output(self, context, instance_id):
"""Get console output for an an instance"""
return self._call_compute_message('get_console_output',
@@ -512,7 +668,7 @@ class API(base.Base):
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. "
"Example device: /dev/vdb") % device)
- self.volume_api.check_attach(context, volume_id)
+ self.volume_api.check_attach(context, volume_id=volume_id)
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
@@ -526,7 +682,7 @@ class API(base.Base):
instance = self.db.volume_get_instance(context.elevated(), volume_id)
if not instance:
raise exception.ApiError(_("Volume isn't attached to anything!"))
- self.volume_api.check_detach(context, volume_id)
+ self.volume_api.check_detach(context, volume_id=volume_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
@@ -537,5 +693,21 @@ class API(base.Base):
def associate_floating_ip(self, context, instance_id, address):
instance = self.get(context, instance_id)
- self.network_api.associate_floating_ip(context, address,
- instance['fixed_ip'])
+ self.network_api.associate_floating_ip(context,
+ floating_ip=address,
+ fixed_ip=instance['fixed_ip'])
+
+ def get_instance_metadata(self, context, instance_id):
+ """Get all metadata associated with an instance."""
+ rv = self.db.instance_metadata_get(context, instance_id)
+ return dict(rv.iteritems())
+
+ def delete_instance_metadata(self, context, instance_id, key):
+ """Delete the given metadata item"""
+ self.db.instance_metadata_delete(context, instance_id, key)
+
+ def update_or_create_instance_metadata(self, context, instance_id,
+ metadata):
+ """Updates or creates instance metadata"""
+ self.db.instance_metadata_update_or_create(context, instance_id,
+ metadata)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 309313fd0..fa02a5dfa 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -4,6 +4,7 @@
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -21,30 +22,120 @@
The built-in instance properties.
"""
-from nova import flags
+from nova import context
+from nova import db
from nova import exception
+from nova import flags
+from nova import log as logging
FLAGS = flags.FLAGS
-INSTANCE_TYPES = {
- 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
- 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
- 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
- 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
- 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+LOG = logging.getLogger('nova.instance_types')
+
+
+def create(name, memory, vcpus, local_gb, flavorid, swap=0,
+ rxtx_quota=0, rxtx_cap=0):
+ """Creates instance types / flavors
+ arguments: name memory vcpus local_gb flavorid swap rxtx_quota rxtx_cap
+ """
+ for option in [memory, vcpus, local_gb, flavorid]:
+ try:
+ int(option)
+ except ValueError:
+ raise exception.InvalidInputException(
+ _("create arguments must be positive integers"))
+ if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0):
+ raise exception.InvalidInputException(
+ _("create arguments must be positive integers"))
+
+ try:
+ db.instance_type_create(
+ context.get_admin_context(),
+ dict(name=name,
+ memory_mb=memory,
+ vcpus=vcpus,
+ local_gb=local_gb,
+ flavorid=flavorid,
+ swap=swap,
+ rxtx_quota=rxtx_quota,
+ rxtx_cap=rxtx_cap))
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Cannot create instance type: %s" % name))
+
+
+def destroy(name):
+ """Marks instance types / flavors as deleted
+ arguments: name"""
+ if name == None:
+ raise exception.InvalidInputException(_("No instance type specified"))
+ else:
+ try:
+ db.instance_type_destroy(context.get_admin_context(), name)
+ except exception.NotFound:
+ LOG.exception(_('Instance type %s not found for deletion' % name))
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
+
+
+def purge(name):
+ """Removes instance types / flavors from database
+ arguments: name"""
+ if name == None:
+ raise exception.InvalidInputException(_("No instance type specified"))
+ else:
+ try:
+ db.instance_type_purge(context.get_admin_context(), name)
+ except exception.NotFound:
+ LOG.exception(_('Instance type %s not found for purge' % name))
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
+
+
+def get_all_types(inactive=0):
+ """Retrieves non-deleted instance_types.
+ Pass true as argument if you want deleted instance types returned also."""
+ return db.instance_type_get_all(context.get_admin_context(), inactive)
+
+
+def get_all_flavors():
+ """retrieves non-deleted flavors. alias for instance_types.get_all_types().
+ Pass true as argument if you want deleted instance types returned also."""
+ return get_all_types(context.get_admin_context())
+
+
+def get_instance_type(name):
+ """Retrieves single instance type by name"""
+ if name is None:
+ return FLAGS.default_instance_type
+ try:
+ ctxt = context.get_admin_context()
+ inst_type = db.instance_type_get_by_name(ctxt, name)
+ return inst_type
+ except exception.DBError:
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
def get_by_type(instance_type):
- """Build instance data structure and save it to the data store."""
+ """retrieve instance type name"""
if instance_type is None:
return FLAGS.default_instance_type
- if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError(_("Unknown instance type: %s") % \
- instance_type, "Invalid")
- return instance_type
+
+ try:
+ ctxt = context.get_admin_context()
+ inst_type = db.instance_type_get_by_name(ctxt, instance_type)
+ return inst_type['name']
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Unknown instance type: %s" %\
+ instance_type))
def get_by_flavor_id(flavor_id):
- for instance_type, details in INSTANCE_TYPES.iteritems():
- if details['flavorid'] == flavor_id:
- return instance_type
- return FLAGS.default_instance_type
+ """retrieve instance type's name by flavor_id"""
+ if flavor_id is None:
+ return FLAGS.default_instance_type
+ try:
+ ctxt = context.get_admin_context()
+ flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id)
+ return flavor['name']
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Unknown flavor: %s" % flavor_id))
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d659712ad..08b772517 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -34,13 +35,17 @@ terminating it.
:func:`nova.utils.import_object`
"""
-import base64
import datetime
+import os
import random
import string
import socket
+import sys
+import tempfile
import functools
+from eventlet import greenthread
+
from nova import exception
from nova import flags
from nova import log as logging
@@ -48,6 +53,7 @@ from nova import manager
from nova import rpc
from nova import utils
from nova.compute import power_state
+from nova.virt import driver
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
@@ -61,6 +67,12 @@ flags.DEFINE_integer('password_length', 12,
flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
+flags.DEFINE_integer('live_migration_retry_count', 30,
+ "Retry count needed in live_migration."
+ " sleep 1 sec for each count")
+flags.DEFINE_integer("rescue_timeout", 0,
+ "Automatically unrescue an instance after N seconds."
+ " Set to 0 to disable.")
LOG = logging.getLogger('nova.compute.manager')
@@ -99,7 +111,7 @@ def checks_instance_lock(function):
return decorated_function
-class ComputeManager(manager.Manager):
+class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
@@ -109,10 +121,19 @@ class ComputeManager(manager.Manager):
# and redocument the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
- self.driver = utils.import_object(compute_driver)
+
+ try:
+ self.driver = utils.check_isinstance(
+ utils.import_object(compute_driver),
+ driver.ComputeDriver)
+ except ImportError as e:
+ LOG.error(_("Unable to load the virtualization driver: %s") % (e))
+ sys.exit(1)
+
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
- super(ComputeManager, self).__init__(*args, **kwargs)
+ super(ComputeManager, self).__init__(service_name="compute",
+ *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -174,14 +195,14 @@ class ComputeManager(manager.Manager):
"""Launch a new instance with specified options."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- instance_ref.onset_files = kwargs.get('onset_files', [])
+ instance_ref.injected_files = kwargs.get('injected_files', [])
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
self.db.instance_update(context,
instance_id,
- {'host': self.host})
+ {'host': self.host, 'launched_on': self.host})
self.db.instance_set_state(context,
instance_id,
@@ -215,9 +236,10 @@ class ComputeManager(manager.Manager):
self.db.instance_update(context,
instance_id,
{'launched_at': now})
- except Exception: # pylint: disable-msg=W0702
- LOG.exception(_("instance %s: Failed to spawn"), instance_id,
- context=context)
+ except Exception: # pylint: disable=W0702
+ LOG.exception(_("Instance '%s' failed to spawn. Is virtualization"
+ " enabled in the BIOS?"), instance_id,
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.SHUTDOWN)
@@ -353,15 +375,10 @@ class ComputeManager(manager.Manager):
LOG.warn(_('trying to inject a file into a non-running '
'instance: %(instance_id)s (state: %(instance_state)s '
'expected: %(expected_state)s)') % locals())
- # Files/paths *should* be base64-encoded at this point, but
- # double-check to make sure.
- b64_path = utils.ensure_b64_encoding(path)
- b64_contents = utils.ensure_b64_encoding(file_contents)
- plain_path = base64.b64decode(b64_path)
nm = instance_ref['name']
- msg = _('instance %(nm)s: injecting file to %(plain_path)s') % locals()
+ msg = _('instance %(nm)s: injecting file to %(path)s') % locals()
LOG.audit(msg)
- self.driver.inject_file(instance_ref, b64_path, b64_contents)
+ self.driver.inject_file(instance_ref, path, file_contents)
@exception.wrap_exception
@checks_instance_lock
@@ -370,12 +387,19 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rescuing')
self.network_manager.setup_compute_network(context, instance_id)
- self.driver.rescue(instance_ref)
+ self.driver.rescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -385,11 +409,18 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- self.driver.unrescue(instance_ref)
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unrescuing')
+ self.driver.unrescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@staticmethod
@@ -399,6 +430,141 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception
@checks_instance_lock
+ def confirm_resize(self, context, instance_id, migration_id):
+ """Destroys the source instance"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+ self.driver.destroy(instance_ref)
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def revert_resize(self, context, instance_id, migration_id):
+ """Destroys the new instance on the destination machine,
+ reverts the model changes, and powers on the old
+ instance on the source machine"""
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+
+ self.driver.destroy(instance_ref)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ rpc.cast(context, topic,
+ {'method': 'finish_revert_resize',
+ 'args': {
+ 'migration_id': migration_ref['id'],
+ 'instance_id': instance_id, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def finish_revert_resize(self, context, instance_id, migration_id):
+ """Finishes the second half of reverting a resize, powering back on
+ the source instance and reverting the resized attributes in the
+ database"""
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ migration_ref['old_flavor_id'])
+
+ # Just roll back the record. There's no need to resize down since
+ # the 'old' VM already has the preferred attributes
+ self.db.instance_update(context, instance_id,
+ dict(memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb']))
+
+ self.driver.revert_resize(instance_ref)
+ self.db.migration_update(context, migration_id,
+ {'status': 'reverted'})
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def prep_resize(self, context, instance_id, flavor_id):
+ """Initiates the process of moving a running instance to another
+ host, possibly changing the RAM and disk size in the process"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ if instance_ref['host'] == FLAGS.host:
+ raise exception.Error(_(
+ 'Migration error: destination same as source!'))
+
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ flavor_id)
+ migration_ref = self.db.migration_create(context,
+ {'instance_id': instance_id,
+ 'source_compute': instance_ref['host'],
+ 'dest_compute': FLAGS.host,
+ 'dest_host': self.driver.get_host_ip_addr(),
+ 'old_flavor_id': instance_type['flavorid'],
+ 'new_flavor_id': flavor_id,
+ 'status': 'pre-migrating'})
+
+ LOG.audit(_('instance %s: migrating to '), instance_id,
+ context=context)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ rpc.cast(context, topic,
+ {'method': 'resize_instance',
+ 'args': {
+ 'migration_id': migration_ref['id'],
+ 'instance_id': instance_id, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def resize_instance(self, context, instance_id, migration_id):
+ """Starts the migration of a running instance to another host"""
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+ self.db.migration_update(context, migration_id,
+ {'status': 'migrating', })
+
+ disk_info = self.driver.migrate_disk_and_power_off(instance_ref,
+ migration_ref['dest_host'])
+ self.db.migration_update(context, migration_id,
+ {'status': 'post-migrating', })
+
+ service = self.db.service_get_by_host_and_topic(context,
+ migration_ref['dest_compute'], FLAGS.compute_topic)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ migration_ref['dest_compute'])
+ rpc.cast(context, topic,
+ {'method': 'finish_resize',
+ 'args': {
+ 'migration_id': migration_id,
+ 'instance_id': instance_id,
+ 'disk_info': disk_info, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def finish_resize(self, context, instance_id, migration_id, disk_info):
+ """Completes the migration process by setting up the newly transferred
+ disk and turning on the instance on its new host machine"""
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get(context,
+ migration_ref['instance_id'])
+ # TODO(mdietz): apply the rest of the instance_type attributes going
+ # after they're supported
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ migration_ref['new_flavor_id'])
+ self.db.instance_update(context, instance_id,
+ dict(instance_type=instance_type['name'],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb']))
+
+ # reload the updated instance ref
+ # FIXME(mdietz): is there reload functionality?
+ instance_ref = self.db.instance_get(context, instance_id)
+ self.driver.finish_resize(instance_ref, disk_info)
+
+ self.db.migration_update(context, migration_id,
+ {'status': 'finished', })
+
+ @exception.wrap_exception
+ @checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this server."""
context = context.elevated()
@@ -557,6 +723,15 @@ class ComputeManager(manager.Manager):
return self.driver.get_ajax_console(instance_ref)
+ @exception.wrap_exception
+ def get_vnc_console(self, context, instance_id):
+ """Return connection information for an vnc console."""
+ context = context.elevated()
+ LOG.debug(_("instance %s: getting vnc console"), instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ return self.driver.get_vnc_console(instance_ref)
+
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
@@ -574,7 +749,7 @@ class ComputeManager(manager.Manager):
volume_id,
instance_id,
mountpoint)
- except Exception as exc: # pylint: disable-msg=W0702
+ except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
@@ -605,3 +780,333 @@ class ComputeManager(manager.Manager):
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
return True
+
+ @exception.wrap_exception
+ def compare_cpu(self, context, cpu_info):
+ """Checks the host cpu is compatible to a cpu given by xml.
+
+ :param context: security context
+ :param cpu_info: json string obtained from virConnect.getCapabilities
+ :returns: See driver.compare_cpu
+
+ """
+ return self.driver.compare_cpu(cpu_info)
+
+ @exception.wrap_exception
+ def create_shared_storage_test_file(self, context):
+ """Makes tmpfile under FLAGS.instance_path.
+
+ This method enables compute nodes to recognize that they mounts
+ same shared storage. (create|check|creanup)_shared_storage_test_file()
+ is a pair.
+
+ :param context: security context
+ :returns: tmpfile name(basename)
+
+ """
+
+ dirpath = FLAGS.instances_path
+ fd, tmp_file = tempfile.mkstemp(dir=dirpath)
+ LOG.debug(_("Creating tmpfile %s to notify to other "
+ "compute nodes that they should mount "
+ "the same storage.") % tmp_file)
+ os.close(fd)
+ return os.path.basename(tmp_file)
+
+ @exception.wrap_exception
+ def check_shared_storage_test_file(self, context, filename):
+ """Confirms existence of the tmpfile under FLAGS.instances_path.
+
+ :param context: security context
+ :param filename: confirm existence of FLAGS.instances_path/thisfile
+
+ """
+
+ tmp_file = os.path.join(FLAGS.instances_path, filename)
+ if not os.path.exists(tmp_file):
+ raise exception.NotFound(_('%s not found') % tmp_file)
+
+ @exception.wrap_exception
+ def cleanup_shared_storage_test_file(self, context, filename):
+ """Removes existence of the tmpfile under FLAGS.instances_path.
+
+ :param context: security context
+ :param filename: remove existence of FLAGS.instances_path/thisfile
+
+ """
+
+ tmp_file = os.path.join(FLAGS.instances_path, filename)
+ os.remove(tmp_file)
+
+ @exception.wrap_exception
+ def update_available_resource(self, context):
+ """See comments update_resource_info.
+
+ :param context: security context
+ :returns: See driver.update_available_resource()
+
+ """
+
+ return self.driver.update_available_resource(context, self.host)
+
+ def pre_live_migration(self, context, instance_id, time=None):
+ """Preparations for live migration at dest host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+
+ """
+
+ if not time:
+ time = greenthread
+
+ # Getting instance info
+ instance_ref = self.db.instance_get(context, instance_id)
+ ec2_id = instance_ref['hostname']
+
+ # Getting fixed ips
+ fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
+ if not fixed_ip:
+ msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.")
+ raise exception.NotFound(msg % locals())
+
+ # If any volume is mounted, prepare here.
+ if not instance_ref['volumes']:
+ LOG.info(_("%s has no volume."), ec2_id)
+ else:
+ for v in instance_ref['volumes']:
+ self.volume_manager.setup_compute_volume(context, v['id'])
+
+ # Bridge settings.
+ # Call this method prior to ensure_filtering_rules_for_instance,
+ # since bridge is not set up, ensure_filtering_rules_for instance
+ # fails.
+ #
+ # Retry operation is necessary because continuously request comes,
+ # concorrent request occurs to iptables, then it complains.
+ max_retry = FLAGS.live_migration_retry_count
+ for cnt in range(max_retry):
+ try:
+ self.network_manager.setup_compute_network(context,
+ instance_id)
+ break
+ except exception.ProcessExecutionError:
+ if cnt == max_retry - 1:
+ raise
+ else:
+ LOG.warn(_("setup_compute_network() failed %(cnt)d."
+ "Retry up to %(max_retry)d for %(ec2_id)s.")
+ % locals())
+ time.sleep(1)
+
+ # Creating filters to hypervisors and firewalls.
+ # An example is that nova-instance-instance-xxx,
+ # which is written to libvirt.xml(Check "virsh nwfilter-list")
+ # This nwfilter is necessary on the destination host.
+ # In addition, this method is creating filtering rule
+ # onto destination host.
+ self.driver.ensure_filtering_rules_for_instance(instance_ref)
+
+ def live_migration(self, context, instance_id, dest):
+ """Executing live migration.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param dest: destination host
+
+ """
+
+ # Get instance for error handling.
+ instance_ref = self.db.instance_get(context, instance_id)
+ i_name = instance_ref.name
+
+ try:
+ # Checking volume node is working correctly when any volumes
+ # are attached to instances.
+ if instance_ref['volumes']:
+ rpc.call(context,
+ FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': instance_id}})
+
+ # Asking dest host to preparing live migration.
+ rpc.call(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, dest),
+ {"method": "pre_live_migration",
+ "args": {'instance_id': instance_id}})
+
+ except Exception:
+ msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
+ LOG.error(msg % locals())
+ self.recover_live_migration(context, instance_ref)
+ raise
+
+ # Executing live migration
+ # live_migration might raises exceptions, but
+ # nothing must be recovered in this version.
+ self.driver.live_migration(context, instance_ref, dest,
+ self.post_live_migration,
+ self.recover_live_migration)
+
+ def post_live_migration(self, ctxt, instance_ref, dest):
+ """Post operations for live migration.
+
+ This method is called from live_migration
+ and mainly updating database record.
+
+ :param ctxt: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param dest: destination host
+
+ """
+
+ LOG.info(_('post_live_migration() is started..'))
+ instance_id = instance_ref['id']
+
+ # Detaching volumes.
+ try:
+ for vol in self.db.volume_get_all_by_instance(ctxt, instance_id):
+ self.volume_manager.remove_compute_volume(ctxt, vol['id'])
+ except exception.NotFound:
+ pass
+
+ # Releasing vlan.
+ # (not necessary in current implementation?)
+
+ # Releasing security group ingress rule.
+ self.driver.unfilter_instance(instance_ref)
+
+ # Database updating.
+ i_name = instance_ref.name
+ try:
+ # Not return if floating_ip is not found, otherwise,
+ # instance never be accessible..
+ floating_ip = self.db.instance_get_floating_address(ctxt,
+ instance_id)
+ if not floating_ip:
+ LOG.info(_('No floating_ip is found for %s.'), i_name)
+ else:
+ floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
+ floating_ip)
+ self.db.floating_ip_update(ctxt,
+ floating_ip_ref['address'],
+ {'host': dest})
+ except exception.NotFound:
+ LOG.info(_('No floating_ip is found for %s.'), i_name)
+ except:
+ LOG.error(_("Live migration: Unexpected error:"
+ "%s cannot inherit floating ip..") % i_name)
+
+ # Restore instance/volume state
+ self.recover_live_migration(ctxt, instance_ref, dest)
+
+ LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.')
+ % locals())
+ LOG.info(_("You may see the error \"libvirt: QEMU error: "
+ "Domain not found: no domain with matching name.\" "
+ "This error can be safely ignored."))
+
+ def recover_live_migration(self, ctxt, instance_ref, host=None):
+ """Recovers Instance/volume state from migrating -> running.
+
+ :param ctxt: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param host:
+ DB column value is updated by this hostname.
+ if none, the host instance currently running is selected.
+
+ """
+
+ if not host:
+ host = instance_ref['host']
+
+ self.db.instance_update(ctxt,
+ instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': host})
+
+ for volume in instance_ref['volumes']:
+ self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'})
+
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+ error_list = super(ComputeManager, self).periodic_tasks(context)
+ if error_list is None:
+ error_list = []
+
+ try:
+ if FLAGS.rescue_timeout > 0:
+ self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
+ except Exception as ex:
+ LOG.warning(_("Error during poll_rescued_instances: %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ try:
+ self._poll_instance_states(context)
+ except Exception as ex:
+ LOG.warning(_("Error during instance poll: %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ return error_list
+
+ def _poll_instance_states(self, context):
+ vm_instances = self.driver.list_instances_detail()
+ vm_instances = dict((vm.name, vm) for vm in vm_instances)
+
+ # Keep a list of VMs not in the DB, cross them off as we find them
+ vms_not_found_in_db = list(vm_instances.keys())
+
+ db_instances = self.db.instance_get_all_by_host(context, self.host)
+
+ for db_instance in db_instances:
+ name = db_instance['name']
+ db_state = db_instance['state']
+ vm_instance = vm_instances.get(name)
+
+ if vm_instance is None:
+ # NOTE(justinsb): We have to be very careful here, because a
+ # concurrent operation could be in progress (e.g. a spawn)
+ if db_state == power_state.NOSTATE:
+ # Assume that NOSTATE => spawning
+ # TODO(justinsb): This does mean that if we crash during a
+ # spawn, the machine will never leave the spawning state,
+ # but this is just the way nova is; this function isn't
+ # trying to correct that problem.
+ # We could have a separate task to correct this error.
+ # TODO(justinsb): What happens during a live migration?
+ LOG.info(_("Found instance '%(name)s' in DB but no VM. "
+ "State=%(db_state)s, so assuming spawn is in "
+ "progress.") % locals())
+ vm_state = db_state
+ else:
+ LOG.info(_("Found instance '%(name)s' in DB but no VM. "
+ "State=%(db_state)s, so setting state to "
+ "shutoff.") % locals())
+ vm_state = power_state.SHUTOFF
+ else:
+ vm_state = vm_instance.state
+ vms_not_found_in_db.remove(name)
+
+ if vm_state != db_state:
+ LOG.info(_("DB/VM state mismatch. Changing state from "
+ "'%(db_state)s' to '%(vm_state)s'") % locals())
+ self.db.instance_set_state(context,
+ db_instance['id'],
+ vm_state)
+
+ if vm_state == power_state.SHUTOFF:
+ # TODO(soren): This is what the compute manager does when you
+ # terminate an instance. At some point I figure we'll have a
+ # "terminated" state and some sort of cleanup job that runs
+ # occasionally, cleaning them out.
+ self.db.instance_destroy(context, db_instance['id'])
+
+ # Are there VMs not in the DB?
+ for vm_not_found_in_db in vms_not_found_in_db:
+ name = vm_not_found_in_db
+ # TODO(justinsb): What to do here? Adopt it? Shut it down?
+ LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring")
+ % locals())
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index adfc2dff0..ef013b2ef 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
@@ -19,6 +20,7 @@
"""The various power states that a VM can be in."""
+#NOTE(justinsb): These are the virDomainState values from libvirt
NOSTATE = 0x00
RUNNING = 0x01
BLOCKED = 0x02
@@ -29,9 +31,10 @@ CRASHED = 0x06
SUSPENDED = 0x07
FAILED = 0x08
-
-def name(code):
- d = {
+# TODO(justinsb): Power state really needs to be a proper class,
+# so that we're not locked into the libvirt status codes and can put mapping
+# logic here rather than spread throughout the code
+_STATE_MAP = {
NOSTATE: 'pending',
RUNNING: 'running',
BLOCKED: 'blocked',
@@ -41,4 +44,11 @@ def name(code):
CRASHED: 'crashed',
SUSPENDED: 'suspended',
FAILED: 'failed to spawn'}
- return d[code]
+
+
+def name(code):
+ return _STATE_MAP[code]
+
+
+def valid_states():
+ return _STATE_MAP.keys()
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 57c75cf4f..bfa571ea9 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -69,7 +69,7 @@ class ConsoleProxyManager(manager.Manager):
except exception.NotFound:
logging.debug(_("Adding console"))
if not password:
- password = self.driver.generate_password()
+ password = utils.generate_password(8)
if not port:
port = self.driver.get_port(context)
console_data = {'instance_name': name,
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
new file mode 100644
index 000000000..521da289f
--- /dev/null
+++ b/nova/console/vmrc.py
@@ -0,0 +1,144 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VMRC console drivers.
+"""
+
+import base64
+import json
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.virt.vmwareapi import vim_util
+
+flags.DEFINE_integer('console_vmrc_port',
+ 443,
+ "port for VMware VMRC connections")
+flags.DEFINE_integer('console_vmrc_error_retries',
+ 10,
+ "number of retries for retrieving VMRC information")
+
+FLAGS = flags.FLAGS
+
+
+class VMRCConsole(object):
+ """VMRC console driver with ESX credentials."""
+
+ def __init__(self):
+ super(VMRCConsole, self).__init__()
+
+ @property
+ def console_type(self):
+ return 'vmrc+credentials'
+
+ def get_port(self, context):
+ """Get available port for consoles."""
+ return FLAGS.console_vmrc_port
+
+ def setup_console(self, context, console):
+ """Sets up console."""
+ pass
+
+ def teardown_console(self, context, console):
+ """Tears down console."""
+ pass
+
+ def init_host(self):
+ """Perform console initialization."""
+ pass
+
+ def fix_pool_password(self, password):
+ """Encode password."""
+ # TODO(sateesh): Encrypt pool password
+ return password
+
+ def generate_password(self, vim_session, pool, instance_name):
+ """
+ Returns VMRC Connection credentials.
+
+ Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
+ """
+ username, password = pool['username'], pool['password']
+ vms = vim_session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name", "config.files.vmPathName"])
+ vm_ds_path_name = None
+ vm_ref = None
+ for vm in vms:
+ vm_name = None
+ ds_path_name = None
+ for prop in vm.propSet:
+ if prop.name == "name":
+ vm_name = prop.val
+ elif prop.name == "config.files.vmPathName":
+ ds_path_name = prop.val
+ if vm_name == instance_name:
+ vm_ref = vm.obj
+ vm_ds_path_name = ds_path_name
+ break
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance_name)
+ json_data = json.dumps({"vm_id": vm_ds_path_name,
+ "username": username,
+ "password": password})
+ return base64.b64encode(json_data)
+
+ def is_otp(self):
+ """Is one time password or not."""
+ return False
+
+
+class VMRCSessionConsole(VMRCConsole):
+ """VMRC console driver with VMRC One Time Sessions."""
+
+ def __init__(self):
+ super(VMRCSessionConsole, self).__init__()
+
+ @property
+ def console_type(self):
+ return 'vmrc+session'
+
+ def generate_password(self, vim_session, pool, instance_name):
+ """
+ Returns a VMRC Session.
+
+ Return string is of the form '<VM MOID>:<VMRC Ticket>'.
+ """
+ vms = vim_session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ vm_ref = None
+ for vm in vms:
+ if vm.propSet[0].val == instance_name:
+ vm_ref = vm.obj
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance_name)
+ virtual_machine_ticket = \
+ vim_session._call_method(
+ vim_session._get_vim(),
+ "AcquireCloneTicket",
+ vim_session._get_vim().get_service_content().sessionManager)
+ json_data = json.dumps({"vm_id": str(vm_ref.value),
+ "username": virtual_machine_ticket,
+ "password": virtual_machine_ticket})
+ return base64.b64encode(json_data)
+
+ def is_otp(self):
+ """Is one time password or not."""
+ return True
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
new file mode 100644
index 000000000..09beac7a0
--- /dev/null
+++ b/nova/console/vmrc_manager.py
@@ -0,0 +1,158 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VMRC Console Manager.
+"""
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import manager
+from nova import rpc
+from nova import utils
+from nova.virt.vmwareapi_conn import VMWareAPISession
+
+LOG = logging.getLogger("nova.console.vmrc_manager")
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('console_public_hostname',
+ '',
+ 'Publicly visible name for this console host')
+flags.DEFINE_string('console_driver',
+ 'nova.console.vmrc.VMRCConsole',
+ 'Driver to use for the console')
+
+
+class ConsoleVMRCManager(manager.Manager):
+
+ """
+ Manager to handle VMRC connections needed for accessing instance consoles.
+ """
+
+ def __init__(self, console_driver=None, *args, **kwargs):
+ self.driver = utils.import_object(FLAGS.console_driver)
+ super(ConsoleVMRCManager, self).__init__(*args, **kwargs)
+
+ def init_host(self):
+ self.sessions = {}
+ self.driver.init_host()
+
+ def _get_vim_session(self, pool):
+ """Get VIM session for the pool specified."""
+ vim_session = None
+ if pool['id'] not in self.sessions.keys():
+ vim_session = VMWareAPISession(pool['address'],
+ pool['username'],
+ pool['password'],
+ FLAGS.console_vmrc_error_retries)
+ self.sessions[pool['id']] = vim_session
+ return self.sessions[pool['id']]
+
+ def _generate_console(self, context, pool, name, instance_id, instance):
+ """Sets up console for the instance."""
+ LOG.debug(_("Adding console"))
+
+ password = self.driver.generate_password(
+ self._get_vim_session(pool),
+ pool,
+ instance.name)
+
+ console_data = {'instance_name': name,
+ 'instance_id': instance_id,
+ 'password': password,
+ 'pool_id': pool['id']}
+ console_data['port'] = self.driver.get_port(context)
+ console = self.db.console_create(context, console_data)
+ self.driver.setup_console(context, console)
+ return console
+
+ @exception.wrap_exception
+ def add_console(self, context, instance_id, password=None,
+ port=None, **kwargs):
+ """
+ Adds a console for the instance. If it is one time password, then we
+ generate new console credentials.
+ """
+ instance = self.db.instance_get(context, instance_id)
+ host = instance['host']
+ name = instance['name']
+ pool = self.get_pool_for_instance_host(context, host)
+ try:
+ console = self.db.console_get_by_pool_instance(context,
+ pool['id'],
+ instance_id)
+ if self.driver.is_otp():
+ console = self._generate_console(
+ context,
+ pool,
+ name,
+ instance_id,
+ instance)
+ except exception.NotFound:
+ console = self._generate_console(
+ context,
+ pool,
+ name,
+ instance_id,
+ instance)
+ return console['id']
+
+ @exception.wrap_exception
+ def remove_console(self, context, console_id, **_kwargs):
+ """Removes a console entry."""
+ try:
+ console = self.db.console_get(context, console_id)
+ except exception.NotFound:
+ LOG.debug(_("Tried to remove non-existent console "
+ "%(console_id)s.") %
+ {'console_id': console_id})
+ return
+ LOG.debug(_("Removing console "
+ "%(console_id)s.") %
+ {'console_id': console_id})
+ self.db.console_delete(context, console_id)
+ self.driver.teardown_console(context, console)
+
+ def get_pool_for_instance_host(self, context, instance_host):
+ """Gets console pool info for the instance."""
+ context = context.elevated()
+ console_type = self.driver.console_type
+ try:
+ pool = self.db.console_pool_get_by_host_type(context,
+ instance_host,
+ self.host,
+ console_type)
+ except exception.NotFound:
+ pool_info = rpc.call(context,
+ self.db.queue_get_for(context,
+ FLAGS.compute_topic,
+ instance_host),
+ {"method": "get_console_pool_info",
+ "args": {"console_type": console_type}})
+ pool_info['password'] = self.driver.fix_pool_password(
+ pool_info['password'])
+ pool_info['host'] = self.host
+ # ESX Address or Proxy Address
+ public_host_name = pool_info['address']
+ if FLAGS.console_public_hostname:
+ public_host_name = FLAGS.console_public_hostname
+ pool_info['public_hostname'] = public_host_name
+ pool_info['console_type'] = console_type
+ pool_info['compute_host'] = instance_host
+ pool = self.db.console_pool_create(context, pool_info)
+ return pool
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index cd257e0a6..0cedfbb13 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -91,10 +91,6 @@ class XVPConsoleProxy(object):
"""Trim password to length, and encode"""
return self._xvp_encrypt(password)
- def generate_password(self, length=8):
- """Returns random console password"""
- return os.urandom(length * 2).encode('base64')[:length]
-
def _rebuild_xvp_conf(self, context):
logging.debug(_("Rebuilding xvp conf"))
pools = [pool for pool in
@@ -133,10 +129,10 @@ class XVPConsoleProxy(object):
return
logging.debug(_("Starting xvp"))
try:
- utils.execute('xvp -p %s -c %s -l %s' %
- (FLAGS.console_xvp_pid,
- FLAGS.console_xvp_conf,
- FLAGS.console_xvp_log))
+ utils.execute('xvp',
+ '-p', FLAGS.console_xvp_pid,
+ '-c', FLAGS.console_xvp_conf,
+ '-l', FLAGS.console_xvp_log)
except exception.ProcessExecutionError, err:
logging.error(_("Error starting xvp: %s") % err)
@@ -190,5 +186,5 @@ class XVPConsoleProxy(object):
flag = '-x'
#xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
- out, err = utils.execute('xvp %s' % flag, process_input=password)
+ out, err = utils.execute('xvp', flag, process_input=password)
return out.strip()
diff --git a/nova/crypto.py b/nova/crypto.py
index a34b940f5..2b122e560 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -26,6 +26,7 @@ import gettext
import hashlib
import os
import shutil
+import string
import struct
import tempfile
import time
@@ -105,8 +106,10 @@ def generate_key_pair(bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
- utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
- (out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
+ utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
+ '-f', keyfile)
+ (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f',
+ '%s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
@@ -118,7 +121,8 @@ def generate_key_pair(bits=1024):
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
- # public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
+ # public_key, err = execute('ssh-keygen', '-y', '-f',
+ # '/dev/stdin', private_key)
return (private_key, public_key, fingerprint)
@@ -143,9 +147,10 @@ def revoke_cert(project_id, file_name):
start = os.getcwd()
os.chdir(ca_folder(project_id))
# NOTE(vish): potential race condition here
- utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name)
- utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" %
- FLAGS.crl_file)
+ utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
+ file_name)
+ utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
+ '-out', FLAGS.crl_file)
os.chdir(start)
@@ -193,9 +198,9 @@ def generate_x509_cert(user_id, project_id, bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
- utils.execute("openssl genrsa -out %s %s" % (keyfile, bits))
- utils.execute("openssl req -new -key %s -out %s -batch -subj %s" %
- (keyfile, csrfile, subject))
+ utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits))
+ utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile,
+ '-batch', '-subj', subject)
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
@@ -210,10 +215,13 @@ def generate_x509_cert(user_id, project_id, bits=1024):
def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
+ geninter_sh_path = os.path.join(os.path.dirname(__file__),
+ 'CA',
+ 'geninter.sh')
start = os.getcwd()
os.chdir(ca_folder())
- utils.execute("sh geninter.sh %s %s" %
- (project_id, _project_cert_subject(project_id)))
+ utils.execute('sh', geninter_sh_path, project_id,
+ _project_cert_subject(project_id))
os.chdir(start)
@@ -222,14 +230,17 @@ def generate_vpn_files(project_id):
csr_fn = os.path.join(project_folder, "server.csr")
crt_fn = os.path.join(project_folder, "server.crt")
+ genvpn_sh_path = os.path.join(os.path.dirname(__file__),
+ 'CA',
+ 'geninter.sh')
if os.path.exists(crt_fn):
return
_ensure_project_folder(project_id)
start = os.getcwd()
os.chdir(ca_folder())
# TODO(vish): the shell scripts could all be done in python
- utils.execute("sh genvpn.sh %s %s" %
- (project_id, _vpn_cert_subject(project_id)))
+ utils.execute('sh', genvpn_sh_path,
+ project_id, _vpn_cert_subject(project_id))
with open(csr_fn, "r") as csrfile:
csr_text = csrfile.read()
(serial, signed_csr) = sign_csr(csr_text, project_id)
@@ -259,10 +270,11 @@ def _sign_csr(csr_text, ca_folder):
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
- utils.execute("openssl ca -batch -out %s -config "
- "./openssl.cnf -infiles %s" % (outbound, inbound))
- out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound)
- serial = out.rpartition("=")[2]
+ utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
+ './openssl.cnf', '-infiles', inbound)
+ out, _err = utils.execute('openssl', 'x509', '-in', outbound,
+ '-serial', '-noout')
+ serial = string.strip(out.rpartition("=")[2])
os.chdir(start)
with open(outbound, "r") as crtfile:
return (serial, crtfile.read())
diff --git a/nova/db/api.py b/nova/db/api.py
index 0a010e727..fd3c63b76 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -71,6 +71,7 @@ class NoMoreTargets(exception.Error):
"""No more available blades"""
pass
+
###################
@@ -80,11 +81,16 @@ def service_destroy(context, instance_id):
def service_get(context, service_id):
- """Get an service or raise if it does not exist."""
+ """Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
-def service_get_all(context, disabled=False):
+def service_get_by_host_and_topic(context, host, topic):
+ """Get a service by host it's on and topic it listens to"""
+ return IMPL.service_get_by_host_and_topic(context, host, topic)
+
+
+def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
@@ -99,6 +105,11 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
+def service_get_all_compute_by_host(context, host):
+ """Get all compute services for a given host."""
+ return IMPL.service_get_all_compute_by_host(context, host)
+
+
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
@@ -148,6 +159,29 @@ def service_update(context, service_id, values):
###################
+def compute_node_get(context, compute_id, session=None):
+ """Get an computeNode or raise if it does not exist."""
+ return IMPL.compute_node_get(context, compute_id)
+
+
+def compute_node_create(context, values):
+ """Create a computeNode from the values dictionary."""
+ return IMPL.compute_node_create(context, values)
+
+
+def compute_node_update(context, compute_id, values):
+ """Set the given properties on an computeNode and update it.
+
+ Raises NotFound if computeNode does not exist.
+
+ """
+
+ return IMPL.compute_node_update(context, compute_id, values)
+
+
+###################
+
+
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
@@ -181,7 +215,7 @@ def certificate_update(context, certificate_id, values):
Raises NotFound if service does not exist.
"""
- return IMPL.service_update(context, certificate_id, values)
+ return IMPL.certificate_update(context, certificate_id, values)
###################
@@ -252,6 +286,33 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
+def floating_ip_update(context, address, values):
+ """Update a floating ip by address or raise if it doesn't exist."""
+ return IMPL.floating_ip_update(context, address, values)
+
+
+####################
+
+def migration_update(context, id, values):
+ """Update a migration instance"""
+ return IMPL.migration_update(context, id, values)
+
+
+def migration_create(context, values):
+ """Create a migration record"""
+ return IMPL.migration_create(context, values)
+
+
+def migration_get(context, migration_id):
+ """Finds a migration by the id"""
+ return IMPL.migration_get(context, migration_id)
+
+
+def migration_get_by_instance_and_status(context, instance_id, status):
+ """Finds a migration by the instance id its migrating"""
+ return IMPL.migration_get_by_instance_and_status(context, instance_id,
+ status)
+
####################
@@ -293,6 +354,11 @@ def fixed_ip_get_all(context):
return IMPL.fixed_ip_get_all(context)
+def fixed_ip_get_all_by_host(context, host):
+ """Get all defined fixed ips used by a host."""
+ return IMPL.fixed_ip_get_all_by_host(context, host)
+
+
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
@@ -414,6 +480,27 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ """Get instances.vcpus by host and project."""
+ return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ """Get amount of memory by host and project."""
+ return IMPL.instance_get_memory_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ """Get total amount of disk by host and project."""
+ return IMPL.instance_get_disk_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
@@ -490,6 +577,13 @@ def network_create_safe(context, values):
return IMPL.network_create_safe(context, values)
+def network_delete_safe(context, network_id):
+ """Delete network with key network_id.
+ This method assumes that the network is not associated with any project
+ """
+ return IMPL.network_delete_safe(context, network_id)
+
+
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
@@ -515,7 +609,7 @@ def network_get_all(context):
return IMPL.network_get_all(context)
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
def network_get_associated_fixed_ips(context, network_id):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id)
@@ -526,6 +620,11 @@ def network_get_by_bridge(context, bridge):
return IMPL.network_get_by_bridge(context, bridge)
+def network_get_by_cidr(context, cidr):
+ """Get a network by cidr or raise if it does not exist"""
+ return IMPL.network_get_by_cidr(context, cidr)
+
+
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)
@@ -630,19 +729,24 @@ def iscsi_target_create_safe(context, values):
###############
-def auth_destroy_token(context, token):
+def auth_token_destroy(context, token_id):
"""Destroy an auth token."""
- return IMPL.auth_destroy_token(context, token)
+ return IMPL.auth_token_destroy(context, token_id)
-def auth_get_token(context, token_hash):
+def auth_token_get(context, token_hash):
"""Retrieves a token given the hash representing it."""
- return IMPL.auth_get_token(context, token_hash)
+ return IMPL.auth_token_get(context, token_hash)
+
+
+def auth_token_update(context, token_hash, values):
+ """Updates a token given the hash representing it."""
+ return IMPL.auth_token_update(context, token_hash, values)
-def auth_create_token(context, token):
+def auth_token_create(context, token):
"""Creates a new token."""
- return IMPL.auth_create_token(context, token)
+ return IMPL.auth_token_create(context, token)
###################
@@ -721,6 +825,11 @@ def volume_get_all_by_host(context, host):
return IMPL.volume_get_all_by_host(context, host)
+def volume_get_all_by_instance(context, instance_id):
+ """Get all volumes belonging to a instance."""
+ return IMPL.volume_get_all_by_instance(context, instance_id)
+
+
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
@@ -1002,6 +1111,41 @@ def console_get(context, console_id, instance_id=None):
return IMPL.console_get(context, console_id, instance_id)
+ ##################
+
+
+def instance_type_create(context, values):
+ """Create a new instance type"""
+ return IMPL.instance_type_create(context, values)
+
+
+def instance_type_get_all(context, inactive=False):
+ """Get all instance types"""
+ return IMPL.instance_type_get_all(context, inactive)
+
+
+def instance_type_get_by_name(context, name):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_name(context, name)
+
+
+def instance_type_get_by_flavor_id(context, id):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_flavor_id(context, id)
+
+
+def instance_type_destroy(context, name):
+ """Delete a instance type"""
+ return IMPL.instance_type_destroy(context, name)
+
+
+def instance_type_purge(context, name):
+ """Purges (removes) an instance type from DB
+ Use instance_type_destroy for most cases
+ """
+ return IMPL.instance_type_purge(context, name)
+
+
####################
@@ -1028,3 +1172,21 @@ def zone_get(context, zone_id):
def zone_get_all(context):
"""Get all child Zones."""
return IMPL.zone_get_all(context)
+
+
+####################
+
+
+def instance_metadata_get(context, instance_id):
+ """Get all metadata for an instance"""
+ return IMPL.instance_metadata_get(context, instance_id)
+
+
+def instance_metadata_delete(context, instance_id, key):
+ """Delete the given metadata item"""
+ IMPL.instance_metadata_delete(context, instance_id, key)
+
+
+def instance_metadata_update_or_create(context, instance_id, metadata):
+ """Create or update instance metadata"""
+ IMPL.instance_metadata_update_or_create(context, instance_id, metadata)
diff --git a/nova/db/base.py b/nova/db/base.py
index 1d1e80866..a0f2180c6 100644
--- a/nova/db/base.py
+++ b/nova/db/base.py
@@ -33,4 +33,4 @@ class Base(object):
def __init__(self, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
- self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
+ self.db = utils.import_object(db_driver) # pylint: disable=C0103
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index d8751bef4..6da8dac10 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -34,6 +34,7 @@ from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import exists
from sqlalchemy.sql import func
+from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
@@ -118,6 +119,11 @@ def service_destroy(context, service_id):
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
+ if service_ref.topic == 'compute' and \
+ len(service_ref.compute_node) != 0:
+ for c in service_ref.compute_node:
+ c.delete(session=session)
+
@require_admin_context
def service_get(context, service_id, session=None):
@@ -125,6 +131,7 @@ def service_get(context, service_id, session=None):
session = get_session()
result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
filter_by(id=service_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -136,12 +143,15 @@ def service_get(context, service_id, session=None):
@require_admin_context
-def service_get_all(context, disabled=False):
+def service_get_all(context, disabled=None):
session = get_session()
- return session.query(models.Service).\
- filter_by(deleted=can_read_deleted(context)).\
- filter_by(disabled=disabled).\
- all()
+ query = session.query(models.Service).\
+ filter_by(deleted=can_read_deleted(context))
+
+ if disabled is not None:
+ query = query.filter_by(disabled=disabled)
+
+ return query.all()
@require_admin_context
@@ -155,6 +165,17 @@ def service_get_all_by_topic(context, topic):
@require_admin_context
+def service_get_by_host_and_topic(context, host, topic):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(disabled=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ first()
+
+
+@require_admin_context
def service_get_all_by_host(context, host):
session = get_session()
return session.query(models.Service).\
@@ -164,6 +185,24 @@ def service_get_all_by_host(context, host):
@require_admin_context
+def service_get_all_compute_by_host(context, host):
+ topic = 'compute'
+ session = get_session()
+ result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_("%s does not exist or is not "
+ "a compute node.") % host)
+
+ return result
+
+
+@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
@@ -274,6 +313,42 @@ def service_update(context, service_id, values):
@require_admin_context
+def compute_node_get(context, compute_id, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.ComputeNode).\
+ filter_by(id=compute_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+
+ if not result:
+ raise exception.NotFound(_('No computeNode for id %s') % compute_id)
+
+ return result
+
+
+@require_admin_context
+def compute_node_create(context, values):
+ compute_node_ref = models.ComputeNode()
+ compute_node_ref.update(values)
+ compute_node_ref.save()
+ return compute_node_ref
+
+
+@require_admin_context
+def compute_node_update(context, compute_id, values):
+ session = get_session()
+ with session.begin():
+ compute_ref = compute_node_get(context, compute_id, session=session)
+ compute_ref.update(values)
+ compute_ref.save(session=session)
+
+
+###################
+
+
+@require_admin_context
def certificate_get(context, certificate_id, session=None):
if not session:
session = get_session()
@@ -494,6 +569,16 @@ def floating_ip_get_by_address(context, address, session=None):
return result
+@require_context
+def floating_ip_update(context, address, values):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ for (key, value) in values.iteritems():
+ floating_ip_ref[key] = value
+ floating_ip_ref.save(session=session)
+
+
###################
@@ -566,18 +651,19 @@ def fixed_ip_disassociate(context, address):
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
session = get_session()
- # NOTE(vish): The nested select is because sqlite doesn't support
- # JOINs in UPDATEs.
- result = session.execute('UPDATE fixed_ips SET instance_id = NULL, '
- 'leased = 0 '
- 'WHERE network_id IN (SELECT id FROM networks '
- 'WHERE host = :host) '
- 'AND updated_at < :time '
- 'AND instance_id IS NOT NULL '
- 'AND allocated = 0',
- {'host': host,
- 'time': time})
- return result.rowcount
+ inner_q = session.query(models.Network.id).\
+ filter_by(host=host).\
+ subquery()
+ result = session.query(models.FixedIp).\
+ filter(models.FixedIp.network_id.in_(inner_q)).\
+ filter(models.FixedIp.updated_at < time).\
+ filter(models.FixedIp.instance_id != None).\
+ filter_by(allocated=0).\
+ update({'instance_id': None,
+ 'leased': 0,
+ 'updated_at': datetime.datetime.utcnow()},
+ synchronize_session='fetch')
+ return result
@require_admin_context
@@ -591,6 +677,22 @@ def fixed_ip_get_all(context, session=None):
return result
+@require_admin_context
+def fixed_ip_get_all_by_host(context, host=None):
+ session = get_session()
+
+ result = session.query(models.FixedIp).\
+ join(models.FixedIp.instance).\
+ filter_by(state=1).\
+ filter_by(host=host).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_('No fixed ips for this host defined'))
+
+ return result
+
+
@require_context
def fixed_ip_get_by_address(context, address, session=None):
if not session:
@@ -665,6 +767,15 @@ def instance_create(context, values):
context - request context object
values - dict containing column values.
"""
+ metadata = values.get('metadata')
+ metadata_refs = []
+ if metadata:
+ for metadata_item in metadata:
+ metadata_ref = models.InstanceMetadata()
+ metadata_ref.update(metadata_item)
+ metadata_refs.append(metadata_ref)
+ values['metadata'] = metadata_refs
+
instance_ref = models.Instance()
instance_ref.update(values)
@@ -690,14 +801,21 @@ def instance_data_get_for_project(context, project_id):
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
- session.execute('update instances set deleted=1,'
- 'deleted_at=:at where id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at where instance_id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
+ session.query(models.Instance).\
+ filter_by(id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -894,6 +1012,45 @@ def instance_add_security_group(context, instance_id, security_group_id):
@require_context
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.vcpus))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.memory_mb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.local_gb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
@@ -939,9 +1096,11 @@ def key_pair_destroy_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update key_pairs set deleted=1 where user_id=:id',
- {'id': user_id})
+ session.query(models.KeyPair).\
+ filter_by(user_id=user_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1044,6 +1203,15 @@ def network_create_safe(context, values):
@require_admin_context
+def network_delete_safe(context, network_id):
+ session = get_session()
+ with session.begin():
+ network_ref = network_get(context, network_id=network_id, \
+ session=session)
+ session.delete(network_ref)
+
+
+@require_admin_context
def network_disassociate(context, network_id):
network_update(context, network_id, {'project_id': None,
'host': None})
@@ -1052,7 +1220,9 @@ def network_disassociate(context, network_id):
@require_admin_context
def network_disassociate_all(context):
session = get_session()
- session.execute('update networks set project_id=NULL')
+ session.query(models.Network).\
+ update({'project_id': None,
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1089,7 +1259,7 @@ def network_get_all(context):
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
@require_admin_context
@@ -1117,6 +1287,18 @@ def network_get_by_bridge(context, bridge):
@require_admin_context
+def network_get_by_cidr(context, cidr):
+ session = get_session()
+ result = session.query(models.Network).\
+ filter_by(cidr=cidr).first()
+
+ if not result:
+ raise exception.NotFound(_('Network with cidr %s does not exist') %
+ cidr)
+ return result
+
+
+@require_admin_context
def network_get_by_instance(_context, instance_id):
session = get_session()
rv = session.query(models.Network).\
@@ -1262,16 +1444,20 @@ def iscsi_target_create_safe(context, values):
@require_admin_context
-def auth_destroy_token(_context, token):
+def auth_token_destroy(context, token_id):
session = get_session()
- session.delete(token)
+ with session.begin():
+ token_ref = auth_token_get(context, token_id, session=session)
+ token_ref.delete(session=session)
@require_admin_context
-def auth_get_token(_context, token_hash):
- session = get_session()
+def auth_token_get(context, token_hash, session=None):
+ if session is None:
+ session = get_session()
tk = session.query(models.AuthToken).\
filter_by(token_hash=token_hash).\
+ filter_by(deleted=can_read_deleted(context)).\
first()
if not tk:
raise exception.NotFound(_('Token %s does not exist') % token_hash)
@@ -1279,7 +1465,16 @@ def auth_get_token(_context, token_hash):
@require_admin_context
-def auth_create_token(_context, token):
+def auth_token_update(context, token_hash, values):
+ session = get_session()
+ with session.begin():
+ token_ref = auth_token_get(context, token_hash, session=session)
+ token_ref.update(values)
+ token_ref.save(session=session)
+
+
+@require_admin_context
+def auth_token_create(_context, token):
tk = models.AuthToken()
tk.update(token)
tk.save()
@@ -1409,15 +1604,17 @@ def volume_data_get_for_project(context, project_id):
def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update volumes set deleted=1 where id=:id',
- {'id': volume_id})
- session.execute('update export_devices set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
- session.execute('update iscsi_targets set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
+ session.query(models.Volume).\
+ filter_by(id=volume_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.ExportDevice).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
+ session.query(models.IscsiTarget).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
@require_admin_context
@@ -1477,6 +1674,18 @@ def volume_get_all_by_host(context, host):
all()
+@require_admin_context
+def volume_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ return result
+
+
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -1637,17 +1846,21 @@ def security_group_create(context, values):
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1 where id=:id',
- {'id': security_group_id})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at '
- 'where security_group_id=:id',
- {'id': security_group_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_rules set deleted=1 '
- 'where group_id=:id',
- {'id': security_group_id})
+ session.query(models.SecurityGroup).\
+ filter_by(id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(security_group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ filter_by(group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1655,9 +1868,14 @@ def security_group_destroy_all(context, session=None):
if not session:
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1')
- session.execute('update security_group_rules set deleted=1')
+ session.query(models.SecurityGroup).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
###################
@@ -1786,12 +2004,15 @@ def user_create(_context, values):
def user_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_role_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where user_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
@@ -1848,8 +2069,11 @@ def project_get_by_user(context, user_id):
session = get_session()
user = session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
+ filter_by(id=user_id).\
options(joinedload_all('projects')).\
first()
+ if not user:
+ raise exception.NotFound(_('Invalid user_id %s') % user_id)
return user.projects
@@ -1882,10 +2106,12 @@ def project_update(context, project_id, values):
def project_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where project_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where project_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(project_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(project_id=id).\
+ delete()
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
@@ -1910,11 +2136,11 @@ def user_get_roles_for_project(context, user_id, project_id):
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
- session.execute('delete from user_project_role_association where '
- 'user_id=:user_id and project_id=:project_id and '
- 'role=:role', {'user_id': user_id,
- 'project_id': project_id,
- 'role': role})
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(project_id=project_id).\
+ filter_by(role=role).\
+ delete()
def user_remove_role(context, user_id, role):
@@ -1959,6 +2185,51 @@ def host_get_networks(context, host):
all()
+###################
+
+
+@require_admin_context
+def migration_create(context, values):
+ migration = models.Migration()
+ migration.update(values)
+ migration.save()
+ return migration
+
+
+@require_admin_context
+def migration_update(context, id, values):
+ session = get_session()
+ with session.begin():
+ migration = migration_get(context, id, session=session)
+ migration.update(values)
+ migration.save(session=session)
+ return migration
+
+
+@require_admin_context
+def migration_get(context, id, session=None):
+ if not session:
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(id=id).first()
+ if not result:
+ raise exception.NotFound(_("No migration found with id %s")
+ % id)
+ return result
+
+
+@require_admin_context
+def migration_get_by_instance_and_status(context, instance_id, status):
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(instance_id=instance_id).\
+ filter_by(status=status).first()
+ if not result:
+ raise exception.NotFound(_("No migration found for instance "
+ "%(instance_id)s with status %(status)s") % locals())
+ return result
+
+
##################
@@ -2020,8 +2291,9 @@ def console_delete(context, console_id):
session = get_session()
with session.begin():
# consoles are meant to be transient. (mdragon)
- session.execute('delete from consoles '
- 'where id=:id', {'id': console_id})
+ session.query(models.Console).\
+ filter_by(id=console_id).\
+ delete()
def console_get_by_pool_instance(context, pool_id, instance_id):
@@ -2060,6 +2332,98 @@ def console_get(context, console_id, instance_id=None):
return result
+ ##################
+
+
+@require_admin_context
+def instance_type_create(_context, values):
+ try:
+ instance_type_ref = models.InstanceTypes()
+ instance_type_ref.update(values)
+ instance_type_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return instance_type_ref
+
+
+@require_context
+def instance_type_get_all(context, inactive=False):
+ """
+ Returns a dict describing all instance_types with name as key.
+ """
+ session = get_session()
+ if inactive:
+ inst_types = session.query(models.InstanceTypes).\
+ order_by("name").\
+ all()
+ else:
+ inst_types = session.query(models.InstanceTypes).\
+ filter_by(deleted=False).\
+ order_by("name").\
+ all()
+ if inst_types:
+ inst_dict = {}
+ for i in inst_types:
+ inst_dict[i['name']] = dict(i)
+ return inst_dict
+ else:
+ raise exception.NotFound
+
+
+@require_context
+def instance_type_get_by_name(context, name):
+ """Returns a dict describing specific instance_type"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(name=name).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No instance type with name %s") % name)
+ else:
+ return dict(inst_type)
+
+
+@require_context
+def instance_type_get_by_flavor_id(context, id):
+ """Returns a dict describing specific flavor_id"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(flavorid=int(id)).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No flavor with flavorid %s") % id)
+ else:
+ return dict(inst_type)
+
+
+@require_admin_context
+def instance_type_destroy(context, name):
+ """ Marks specific instance_type as deleted"""
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.update(dict(deleted=True))
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
+@require_admin_context
+def instance_type_purge(context, name):
+ """ Removes specific instance_type from DB
+ Usually instance_type_destroy should be used
+ """
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.delete()
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
####################
@@ -2073,6 +2437,7 @@ def zone_create(context, values):
@require_admin_context
def zone_update(context, zone_id, values):
+ session = get_session()
zone = session.query(models.Zone).filter_by(id=zone_id).first()
if not zone:
raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
@@ -2085,8 +2450,9 @@ def zone_update(context, zone_id, values):
def zone_delete(context, zone_id):
session = get_session()
with session.begin():
- session.execute('delete from zones '
- 'where id=:id', {'id': zone_id})
+ session.query(models.Zone).\
+ filter_by(id=zone_id).\
+ delete()
@require_admin_context
@@ -2102,3 +2468,65 @@ def zone_get(context, zone_id):
def zone_get_all(context):
session = get_session()
return session.query(models.Zone).all()
+
+
+####################
+
+@require_context
+def instance_metadata_get(context, instance_id):
+ session = get_session()
+
+ meta_results = session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+
+ meta_dict = {}
+ for i in meta_results:
+ meta_dict[i['key']] = i['value']
+ return meta_dict
+
+
+@require_context
+def instance_metadata_delete(context, instance_id, key):
+ session = get_session()
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def instance_metadata_get_item(context, instance_id, key):
+ session = get_session()
+
+ meta_result = session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not meta_result:
+ raise exception.NotFound(_('Invalid metadata key for instance %s') %
+ instance_id)
+ return meta_result
+
+
+@require_context
+def instance_metadata_update_or_create(context, instance_id, metadata):
+ session = get_session()
+ meta_ref = None
+ for key, value in metadata.iteritems():
+ try:
+ meta_ref = instance_metadata_get_item(context, instance_id, key,
+ session)
+ except:
+ meta_ref = models.InstanceMetadata()
+ meta_ref.update({"key": key, "value": value,
+ "instance_id": instance_id,
+ "deleted": 0})
+ meta_ref.save(session=session)
+ return metadata
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
new file mode 100644
index 000000000..427934d53
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
@@ -0,0 +1,90 @@
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+fixed_ips = Table(
+ "fixed_ips",
+ meta,
+ Column(
+ "id",
+ Integer(),
+ primary_key=True,
+ nullable=False))
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+fixed_ips_addressV6 = Column(
+ "addressV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_netmaskV6 = Column(
+ "netmaskV6",
+ String(
+ length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_gatewayV6 = Column(
+ "gatewayV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ fixed_ips.create_column(fixed_ips_addressV6)
+ fixed_ips.create_column(fixed_ips_netmaskV6)
+ fixed_ips.create_column(fixed_ips_gatewayV6)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
new file mode 100644
index 000000000..5e2cb69d9
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import api
+from nova import db
+from nova import log as logging
+
+import datetime
+
+meta = MetaData()
+
+
+#
+# New Tables
+#
+instance_types = Table('instance_types', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('vcpus', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('flavorid', Integer(), nullable=False, unique=True),
+ Column('swap', Integer(), nullable=False, default=0),
+ Column('rxtx_quota', Integer(), nullable=False, default=0),
+ Column('rxtx_cap', Integer(), nullable=False, default=0))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here
+ # Don't create your own engine; bind migrate_engine
+ # to your metadata
+ meta.bind = migrate_engine
+ try:
+ instance_types.create()
+ except Exception:
+ logging.info(repr(instance_types))
+ logging.exception('Exception while creating instance_types table')
+ raise
+
+ # Here are the old static instance types
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+ try:
+ i = instance_types.insert()
+ for name, values in INSTANCE_TYPES.iteritems():
+ # FIXME(kpepple) should we be seeding created_at / updated_at ?
+ # now = datetime.datatime.utcnow()
+ i.execute({'name': name, 'memory_mb': values["memory_mb"],
+ 'vcpus': values["vcpus"], 'deleted': False,
+ 'local_gb': values["local_gb"],
+ 'flavorid': values["flavorid"]})
+ except Exception:
+ logging.info(repr(instance_types))
+ logging.exception('Exception while seeding instance_types table')
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ for table in (instance_types):
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
new file mode 100644
index 000000000..4fda525f1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.from sqlalchemy import *
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+migrations = Table('migrations', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('source_compute', String(255)),
+ Column('dest_compute', String(255)),
+ Column('dest_host', String(255)),
+ Column('instance_id', Integer, ForeignKey('instances.id'),
+ nullable=True),
+ Column('status', String(255)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (migrations, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
new file mode 100644
index 000000000..eb3066894
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
@@ -0,0 +1,51 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from sqlalchemy.sql import text
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+instances_os_type = Column('os_type',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ instances.create_column(instances_os_type)
+ migrate_engine.execute(instances.update()\
+ .where(instances.c.os_type == None)\
+ .values(os_type='linux'))
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances.drop_column('os_type')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
new file mode 100644
index 000000000..23ccccb4e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate import *
+from nova import log as logging
+from sqlalchemy import *
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+compute_nodes = Table('compute_nodes', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('service_id', Integer(), nullable=False),
+
+ Column('vcpus', Integer(), nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('vcpus_used', Integer(), nullable=False),
+ Column('memory_mb_used', Integer(), nullable=False),
+ Column('local_gb_used', Integer(), nullable=False),
+ Column('hypervisor_type',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ Column('hypervisor_version', Integer(), nullable=False),
+ Column('cpu_info',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ )
+
+
+#
+# Tables to alter
+#
+instances_launched_on = Column(
+ 'launched_on',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ compute_nodes.create()
+ except Exception:
+ logging.info(repr(compute_nodes))
+ logging.exception('Exception while creating table')
+ meta.drop_all(tables=[compute_nodes])
+ raise
+
+ instances.create_column(instances_launched_on)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
new file mode 100644
index 000000000..e87085668
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2011 NTT.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# Tables to alter
+#
+networks = Table('networks', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('injected', Boolean(create_constraint=True, name=None)),
+ Column('cidr',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('netmask',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('bridge',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('gateway',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('broadcast',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dns',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vlan', Integer()),
+ Column('vpn_public_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vpn_public_port', Integer()),
+ Column('vpn_private_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dhcp_start',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('cidr_v6',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('ra_server', String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column(
+ 'label',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+fixed_ips = Table('fixed_ips', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('network_id',
+ Integer(),
+ ForeignKey('networks.id'),
+ nullable=True),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=True),
+ Column('allocated', Boolean(create_constraint=True, name=None)),
+ Column('leased', Boolean(create_constraint=True, name=None)),
+ Column('reserved', Boolean(create_constraint=True, name=None)),
+ Column("addressV6", String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column("netmaskV6", String(length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column("gatewayV6", String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ )
+#
+# New Tables
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+networks_netmask_v6 = Column(
+ 'netmask_v6',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Alter column name
+ networks.c.ra_server.alter(name='gateway_v6')
+ # Add new column to existing table
+ networks.create_column(networks_netmask_v6)
+
+ # drop existing columns from table
+ fixed_ips.c.addressV6.drop()
+ fixed_ips.c.netmaskV6.drop()
+ fixed_ips.c.gatewayV6.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
new file mode 100644
index 000000000..3fb92e85c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
@@ -0,0 +1,50 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.from sqlalchemy import *
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+migrations = Table('migrations', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# Tables to alter
+#
+#
+
+old_flavor_id = Column('old_flavor_id', Integer())
+new_flavor_id = Column('new_flavor_id', Integer())
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ migrations.create_column(old_flavor_id)
+ migrations.create_column(new_flavor_id)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ migrations.drop_column(old_flavor_id)
+ migrations.drop_column(new_flavor_id)
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 9bdaa6d6b..d9e303599 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -60,7 +60,7 @@ def db_version():
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
- 'services',
+ 'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 1882efeba..3b95ac23e 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -113,6 +113,41 @@ class Service(BASE, NovaBase):
availability_zone = Column(String(255), default='nova')
+class ComputeNode(BASE, NovaBase):
+ """Represents a running compute service on a host."""
+
+ __tablename__ = 'compute_nodes'
+ id = Column(Integer, primary_key=True)
+ service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
+ service = relationship(Service,
+ backref=backref('compute_node'),
+ foreign_keys=service_id,
+ primaryjoin='and_('
+ 'ComputeNode.service_id == Service.id,'
+ 'ComputeNode.deleted == False)')
+
+ vcpus = Column(Integer, nullable=True)
+ memory_mb = Column(Integer, nullable=True)
+ local_gb = Column(Integer, nullable=True)
+ vcpus_used = Column(Integer, nullable=True)
+ memory_mb_used = Column(Integer, nullable=True)
+ local_gb_used = Column(Integer, nullable=True)
+ hypervisor_type = Column(Text, nullable=True)
+ hypervisor_version = Column(Integer, nullable=True)
+
+ # Note(masumotok): Expected Strings example:
+ #
+ # '{"arch":"x86_64",
+ # "model":"Nehalem",
+ # "topology":{"sockets":1, "threads":2, "cores":3},
+ # "features":["tdtscp", "xtpr"]}'
+ #
+ # Points are "json translatable" and it must have all dictionary keys
+ # above, since it is copied from <cpu> tag of getCapabilities()
+ # (See libvirt.virtConnection).
+ cpu_info = Column(Text, nullable=True)
+
+
class Certificate(BASE, NovaBase):
"""Represents a an x509 certificate"""
__tablename__ = 'certificates'
@@ -126,11 +161,16 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
+ injected_files = []
+
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
- return FLAGS.instance_name_template % self.id
+ base_name = FLAGS.instance_name_template % self.id
+ if getattr(self, '_rescue', False):
+ base_name += "-rescue"
+ return base_name
admin_pass = Column(String(255))
user_id = Column(String(255))
@@ -186,8 +226,13 @@ class Instance(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ # To remember on which host a instance booted.
+ # An instance may have moved to another host by live migraiton.
+ launched_on = Column(Text)
locked = Column(Boolean)
+ os_type = Column(String(255))
+
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
# vmstate_state = running, halted, suspended, paused
@@ -210,6 +255,20 @@ class InstanceActions(BASE, NovaBase):
error = Column(Text)
+class InstanceTypes(BASE, NovaBase):
+ """Represent possible instance_types or flavor of VM offered"""
+ __tablename__ = "instance_types"
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255), unique=True)
+ memory_mb = Column(Integer)
+ vcpus = Column(Integer)
+ local_gb = Column(Integer)
+ flavorid = Column(Integer, unique=True)
+ swap = Column(Integer, nullable=False, default=0)
+ rxtx_quota = Column(Integer, nullable=False, default=0)
+ rxtx_cap = Column(Integer, nullable=False, default=0)
+
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
@@ -370,6 +429,20 @@ class KeyPair(BASE, NovaBase):
public_key = Column(Text)
+class Migration(BASE, NovaBase):
+ """Represents a running host-to-host migration."""
+ __tablename__ = 'migrations'
+ id = Column(Integer, primary_key=True, nullable=False)
+ source_compute = Column(String(255))
+ dest_compute = Column(String(255))
+ dest_host = Column(String(255))
+ old_flavor_id = Column(Integer())
+ new_flavor_id = Column(Integer())
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
+ #TODO(_cerberus_): enum
+ status = Column(String(255))
+
+
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
@@ -383,8 +456,8 @@ class Network(BASE, NovaBase):
cidr = Column(String(255), unique=True)
cidr_v6 = Column(String(255), unique=True)
- ra_server = Column(String(255))
-
+ gateway_v6 = Column(String(255))
+ netmask_v6 = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
gateway = Column(String(255))
@@ -571,12 +644,12 @@ def register_models():
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
- models = (Service, Instance, InstanceActions,
+ models = (Service, Instance, InstanceActions, InstanceTypes,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- InstanceMetadata)
+ InstanceMetadata, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 7d65bd6a5..4e2bbdbaf 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -46,7 +46,7 @@ class Error(Exception):
class ApiError(Error):
- def __init__(self, message='Unknown', code='Unknown'):
+ def __init__(self, message='Unknown', code='ApiError'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
@@ -88,6 +88,10 @@ class InvalidInputException(Error):
pass
+class InvalidContentType(Error):
+ pass
+
+
class TimeoutException(Error):
pass
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index dd82a9366..a7dee8caf 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -48,7 +48,6 @@ class Exchange(object):
nm = self.name
LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)'
' %(message)s') % locals())
- routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
LOG.debug(_('Publishing to route %s'), f)
diff --git a/nova/flags.py b/nova/flags.py
index f885de293..77fd0c2c0 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -298,10 +298,14 @@ DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server')
DEFINE_integer('ec2_port', 8773, 'cloud controller port')
DEFINE_string('ec2_scheme', 'http', 'prefix for ec2')
DEFINE_string('ec2_path', '/services/Cloud', 'suffix for ec2')
+DEFINE_string('osapi_extensions_path', '/var/lib/nova/extensions',
+ 'default directory for nova extensions')
DEFINE_string('osapi_host', '$my_ip', 'ip of api server')
DEFINE_string('osapi_scheme', 'http', 'prefix for openstack')
DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack')
+DEFINE_integer('osapi_max_limit', 1000,
+ 'max number of items returned in a collection response')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
@@ -321,6 +325,8 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
"Top-level directory for maintaining nova's state")
+DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
+ "Directory for lock files")
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
@@ -346,7 +352,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
-DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
+DEFINE_string('image_service', 'nova.image.local.LocalImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),
@@ -354,5 +360,11 @@ DEFINE_string('host', socket.gethostname(),
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
+
DEFINE_list('memcached_servers', None,
- 'Memcached servers or None for in process cache.')
+ 'Memcached servers or None for in process cache.')
+
+DEFINE_string('zone_name', 'nova', 'name of this zone')
+DEFINE_list('zone_capabilities',
+ ['hypervisor=xenserver;kvm', 'os=linux;windows'],
+ 'Key/Multi-value list representng capabilities of this zone')
diff --git a/nova/image/fake.py b/nova/image/fake.py
new file mode 100644
index 000000000..08302d6eb
--- /dev/null
+++ b/nova/image/fake.py
@@ -0,0 +1,113 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Implementation of an fake image service"""
+
+import copy
+import datetime
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.image import service
+
+
+LOG = logging.getLogger('nova.image.fake')
+
+
+FLAGS = flags.FLAGS
+
+
+class FakeImageService(service.BaseImageService):
+ """Mock (fake) image service for unit testing."""
+
+ def __init__(self):
+ self.images = {}
+ # NOTE(justinsb): The OpenStack API can't upload an image?
+ # So, make sure we've got one..
+ timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
+ image = {'id': '123456',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'status': 'active',
+ 'type': 'machine',
+ 'properties': {'kernel_id': FLAGS.null_kernel,
+ 'ramdisk_id': FLAGS.null_kernel,
+ 'disk_format': 'ami'}
+ }
+ self.create(None, image)
+ super(FakeImageService, self).__init__()
+
+ def index(self, context):
+ """Returns list of images."""
+ return copy.deepcopy(self.images.values())
+
+ def detail(self, context):
+ """Return list of detailed image information."""
+ return copy.deepcopy(self.images.values())
+
+ def show(self, context, image_id):
+ """Get data about specified image.
+
+ Returns a dict containing image data for the given opaque image id.
+
+ """
+ image_id = int(image_id)
+ image = self.images.get(image_id)
+ if image:
+ return copy.deepcopy(image)
+ LOG.warn("Unable to find image id %s. Have images: %s",
+ image_id, self.images)
+ raise exception.NotFound
+
+ def create(self, context, data):
+ """Store the image data and return the new image id.
+
+ :raises Duplicate if the image already exist.
+
+ """
+ image_id = int(data['id'])
+ if self.images.get(image_id):
+ raise exception.Duplicate()
+
+ self.images[image_id] = copy.deepcopy(data)
+
+ def update(self, context, image_id, data):
+ """Replace the contents of the given image with the new data.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ image_id = int(image_id)
+ if not self.images.get(image_id):
+ raise exception.NotFound
+ self.images[image_id] = copy.deepcopy(data)
+
+ def delete(self, context, image_id):
+ """Delete the given image.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ image_id = int(image_id)
+ removed = self.images.pop(image_id, None)
+ if not removed:
+ raise exception.NotFound
+
+ def delete_all(self):
+ """Clears out all images."""
+ self.images.clear()
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 593c4bce6..fdf468594 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -17,9 +17,10 @@
"""Implementation of an image service that uses Glance as the backend"""
from __future__ import absolute_import
-import httplib
-import json
-import urlparse
+
+import datetime
+
+from glance.common import exception as glance_exception
from nova import exception
from nova import flags
@@ -38,58 +39,204 @@ GlanceClient = utils.import_class('glance.client.Client')
class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
- def __init__(self):
- self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
+ GLANCE_ONLY_ATTRS = ["size", "location", "disk_format",
+ "container_format"]
+
+ # NOTE(sirp): Overriding to use _translate_to_service provided by
+ # BaseImageService
+ SERVICE_IMAGE_ATTRS = service.BaseImageService.BASE_IMAGE_ATTRS +\
+ GLANCE_ONLY_ATTRS
+
+ def __init__(self, client=None):
+ # FIXME(sirp): can we avoid dependency-injection here by using
+ # stubbing out a fake?
+ if client is None:
+ self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
+ else:
+ self.client = client
def index(self, context):
"""
Calls out to Glance for a list of images available
"""
- return self.client.get_images()
+ # NOTE(sirp): We need to use `get_images_detailed` and not
+ # `get_images` here because we need `is_public` and `properties`
+ # included so we can filter by user
+ filtered = []
+ image_metas = self.client.get_images_detailed()
+ for image_meta in image_metas:
+ if self._is_image_available(context, image_meta):
+ meta_subset = utils.subset_dict(image_meta, ('id', 'name'))
+ filtered.append(meta_subset)
+ return filtered
def detail(self, context):
"""
Calls out to Glance for a list of detailed image information
"""
- return self.client.get_images_detailed()
-
- def show(self, context, id):
+ filtered = []
+ image_metas = self.client.get_images_detailed()
+ for image_meta in image_metas:
+ if self._is_image_available(context, image_meta):
+ base_image_meta = self._translate_to_base(image_meta)
+ filtered.append(base_image_meta)
+ return filtered
+
+ def show(self, context, image_id):
"""
Returns a dict containing image data for the given opaque image id.
"""
- image = self.client.get_image_meta(id)
- if image:
- return image
+ try:
+ image_meta = self.client.get_image_meta(image_id)
+ except glance_exception.NotFound:
+ raise exception.NotFound
+
+ if not self._is_image_available(context, image_meta):
+ raise exception.NotFound
+
+ base_image_meta = self._translate_to_base(image_meta)
+ return base_image_meta
+
+ def show_by_name(self, context, name):
+ """
+ Returns a dict containing image data for the given name.
+ """
+ # TODO(vish): replace this with more efficient call when glance
+ # supports it.
+ image_metas = self.detail(context)
+ for image_meta in image_metas:
+ if name == image_meta.get('name'):
+ return image_meta
raise exception.NotFound
- def create(self, context, data):
+ def get(self, context, image_id, data):
"""
- Store the image data and return the new image id.
+ Calls out to Glance for metadata and data and writes data.
+ """
+ try:
+ image_meta, image_chunks = self.client.get_image(image_id)
+ except glance_exception.NotFound:
+ raise exception.NotFound
- :raises AlreadyExists if the image already exist.
+ for chunk in image_chunks:
+ data.write(chunk)
+ base_image_meta = self._translate_to_base(image_meta)
+ return base_image_meta
+
+ def create(self, context, image_meta, data=None):
"""
- return self.client.add_image(image_meta=data)
+ Store the image data and return the new image id.
- def update(self, context, image_id, data):
+ :raises AlreadyExists if the image already exist.
+ """
+ # Translate Base -> Service
+ LOG.debug(_("Creating image in Glance. Metadata passed in %s"),
+ image_meta)
+ sent_service_image_meta = self._translate_to_service(image_meta)
+ LOG.debug(_("Metadata after formatting for Glance %s"),
+ sent_service_image_meta)
+
+ recv_service_image_meta = self.client.add_image(
+ sent_service_image_meta, data)
+
+ # Translate Service -> Base
+ base_image_meta = self._translate_to_base(recv_service_image_meta)
+ LOG.debug(_("Metadata returned from Glance formatted for Base %s"),
+ base_image_meta)
+ return base_image_meta
+
+ def update(self, context, image_id, image_meta, data=None):
"""Replace the contents of the given image with the new data.
:raises NotFound if the image does not exist.
-
"""
- return self.client.update_image(image_id, data)
+ try:
+ image_meta = self.client.update_image(image_id, image_meta, data)
+ except glance_exception.NotFound:
+ raise exception.NotFound
+
+ base_image_meta = self._translate_to_base(image_meta)
+ return base_image_meta
def delete(self, context, image_id):
"""
Delete the given image.
:raises NotFound if the image does not exist.
-
"""
- return self.client.delete_image(image_id)
+ try:
+ result = self.client.delete_image(image_id)
+ except glance_exception.NotFound:
+ raise exception.NotFound
+ return result
def delete_all(self):
"""
Clears out all images
"""
pass
+
+ @classmethod
+ def _translate_to_base(cls, image_meta):
+ """Overriding the base translation to handle conversion to datetime
+ objects
+ """
+ image_meta = service.BaseImageService._translate_to_base(image_meta)
+ image_meta = _convert_timestamps_to_datetimes(image_meta)
+ return image_meta
+
+ @staticmethod
+ def _is_image_available(context, image_meta):
+ """
+ Images are always available if they are public or if the user is an
+ admin.
+
+ Otherwise, we filter by project_id (if present) and then fall-back to
+ images owned by user.
+ """
+ # FIXME(sirp): We should be filtering by user_id on the Glance side
+ # for security; however, we can't do that until we get authn/authz
+ # sorted out. Until then, filtering in Nova.
+ if image_meta['is_public'] or context.is_admin:
+ return True
+
+ properties = image_meta['properties']
+
+ if context.project_id and ('project_id' in properties):
+ return str(properties['project_id']) == str(project_id)
+
+ try:
+ user_id = properties['user_id']
+ except KeyError:
+ return False
+
+ return str(user_id) == str(context.user_id)
+
+
+# utility functions
+def _convert_timestamps_to_datetimes(image_meta):
+ """
+ Returns image with known timestamp fields converted to datetime objects
+ """
+ for attr in ['created_at', 'updated_at', 'deleted_at']:
+ if image_meta.get(attr):
+ image_meta[attr] = _parse_glance_iso8601_timestamp(
+ image_meta[attr])
+ return image_meta
+
+
+def _parse_glance_iso8601_timestamp(timestamp):
+ """
+ Parse a subset of iso8601 timestamps into datetime objects
+ """
+ iso_formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S"]
+
+ for iso_format in iso_formats:
+ try:
+ return datetime.datetime.strptime(timestamp, iso_format)
+ except ValueError:
+ pass
+
+ raise ValueError(_("%(timestamp)s does not follow any of the "
+ "signatures: %(ISO_FORMATS)s") % locals())
diff --git a/nova/image/local.py b/nova/image/local.py
index f78b9aa89..1fb6e1f13 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -15,57 +15,128 @@
# License for the specific language governing permissions and limitations
# under the License.
-import cPickle as pickle
+import json
import os.path
import random
-import tempfile
+import shutil
from nova import exception
+from nova import flags
+from nova import log as logging
from nova.image import service
+from nova import utils
-class LocalImageService(service.BaseImageService):
+FLAGS = flags.FLAGS
+flags.DEFINE_string('images_path', '$state_path/images',
+ 'path to decrypted images')
+
+LOG = logging.getLogger('nova.image.local')
+
+class LocalImageService(service.BaseImageService):
"""Image service storing images to local disk.
+
It assumes that image_ids are integers.
"""
def __init__(self):
- self._path = tempfile.mkdtemp()
+ self._path = FLAGS.images_path
- def _path_to(self, image_id):
- return os.path.join(self._path, str(image_id))
+ def _path_to(self, image_id, fname='info.json'):
+ if fname:
+ return os.path.join(self._path, '%08x' % int(image_id), fname)
+ return os.path.join(self._path, '%08x' % int(image_id))
def _ids(self):
"""The list of all image ids."""
- return [int(i) for i in os.listdir(self._path)]
+ images = []
+ for image_dir in os.listdir(self._path):
+ try:
+ unhexed_image_id = int(image_dir, 16)
+ except ValueError:
+ LOG.error(
+ _("%s is not in correct directory naming format"\
+ % image_dir))
+ else:
+ images.append(unhexed_image_id)
+ return images
def index(self, context):
- return [dict(id=i['id'], name=i['name']) for i in self.detail(context)]
+ filtered = []
+ image_metas = self.detail(context)
+ for image_meta in image_metas:
+ meta = utils.subset_dict(image_meta, ('id', 'name'))
+ filtered.append(meta)
+ return filtered
def detail(self, context):
- return [self.show(context, id) for id in self._ids()]
+ images = []
+ for image_id in self._ids():
+ try:
+ image = self.show(context, image_id)
+ images.append(image)
+ except exception.NotFound:
+ continue
+ return images
+
+ def show(self, context, image_id):
+ try:
+ with open(self._path_to(image_id)) as metadata_file:
+ return json.load(metadata_file)
+ except (IOError, ValueError):
+ raise exception.NotFound
+
+ def show_by_name(self, context, name):
+ """Returns a dict containing image data for the given name."""
+ # NOTE(vish): Not very efficient, but the local image service
+ # is for testing so it should be fine.
+ images = self.detail(context)
+ image = None
+ for cantidate in images:
+ if name == cantidate.get('name'):
+ image = cantidate
+ break
+ if image == None:
+ raise exception.NotFound
+ return image
- def show(self, context, id):
+ def get(self, context, image_id, data):
+ """Get image and metadata."""
try:
- return pickle.load(open(self._path_to(id)))
- except IOError:
+ with open(self._path_to(image_id)) as metadata_file:
+ metadata = json.load(metadata_file)
+ with open(self._path_to(image_id, 'image')) as image_file:
+ shutil.copyfileobj(image_file, data)
+ except (IOError, ValueError):
raise exception.NotFound
+ return metadata
- def create(self, context, data):
- """Store the image data and return the new image id."""
- id = random.randint(0, 2 ** 31 - 1)
- data['id'] = id
- self.update(context, id, data)
- return id
+ def create(self, context, metadata, data=None):
+ """Store the image data and return the new image."""
+ image_id = random.randint(0, 2 ** 31 - 1)
+ image_path = self._path_to(image_id, None)
+ if not os.path.exists(image_path):
+ os.mkdir(image_path)
+ return self.update(context, image_id, metadata, data)
- def update(self, context, image_id, data):
+ def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data."""
+ metadata['id'] = image_id
try:
- pickle.dump(data, open(self._path_to(image_id), 'w'))
- except IOError:
+ if data:
+ location = self._path_to(image_id, 'image')
+ with open(location, 'w') as image_file:
+ shutil.copyfileobj(data, image_file)
+ # NOTE(vish): update metadata similarly to glance
+ metadata['status'] = 'active'
+ metadata['location'] = location
+ with open(self._path_to(image_id), 'w') as metadata_file:
+ json.dump(metadata, metadata_file)
+ except (IOError, ValueError):
raise exception.NotFound
+ return metadata
def delete(self, context, image_id):
"""Delete the given image.
@@ -73,18 +144,11 @@ class LocalImageService(service.BaseImageService):
"""
try:
- os.unlink(self._path_to(image_id))
- except IOError:
+ shutil.rmtree(self._path_to(image_id, None))
+ except (IOError, ValueError):
raise exception.NotFound
def delete_all(self):
"""Clears out all images in local directory."""
- for id in self._ids():
- os.unlink(self._path_to(id))
-
- def delete_imagedir(self):
- """Deletes the local directory.
- Raises OSError if directory is not empty.
-
- """
- os.rmdir(self._path)
+ for image_id in self._ids():
+ shutil.rmtree(self._path_to(image_id, None))
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 14135a1ee..ddec5f3aa 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -21,94 +21,94 @@ Proxy AMI-related calls from the cloud controller, to the running
objectstore service.
"""
-import json
-import urllib
+import binascii
+import eventlet
+import os
+import shutil
+import tarfile
+import tempfile
+from xml.etree import ElementTree
import boto.s3.connection
+from nova import crypto
from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
from nova.image import service
+from nova.api.ec2 import ec2utils
FLAGS = flags.FLAGS
+flags.DEFINE_string('image_decryption_dir', '/tmp',
+ 'parent dir for tempdir used for image decryption')
-def map_s3_to_base(image):
- """Convert from S3 format to format defined by BaseImageService."""
- i = {}
- i['id'] = image.get('imageId')
- i['name'] = image.get('imageId')
- i['kernel_id'] = image.get('kernelId')
- i['ramdisk_id'] = image.get('ramdiskId')
- i['location'] = image.get('imageLocation')
- i['owner_id'] = image.get('imageOwnerId')
- i['status'] = image.get('imageState')
- i['type'] = image.get('type')
- i['is_public'] = image.get('isPublic')
- i['architecture'] = image.get('architecture')
- return i
+class S3ImageService(service.BaseImageService):
+ def __init__(self, service=None, *args, **kwargs):
+ if service == None:
+ service = utils.import_object(FLAGS.image_service)
+ self.service = service
+ self.service.__init__(*args, **kwargs)
+ def create(self, context, metadata, data=None):
+ """metadata['properties'] should contain image_location"""
+ image = self._s3_create(context, metadata)
+ return image
-class S3ImageService(service.BaseImageService):
+ def delete(self, context, image_id):
+ # FIXME(vish): call to show is to check filter
+ self.show(context, image_id)
+ self.service.delete(context, image_id)
- def modify(self, context, image_id, operation):
- self._conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=self._qs({'image_id': image_id,
- 'operation': operation}))
- return True
-
- def update(self, context, image_id, attributes):
- """update an image's attributes / info.json"""
- attributes.update({"image_id": image_id})
- self._conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=self._qs(attributes))
- return True
-
- def register(self, context, image_location):
- """ rpc call to register a new image based from a manifest """
- image_id = utils.generate_uid('ami')
- self._conn(context).make_request(
- method='PUT',
- bucket='_images',
- query_args=self._qs({'image_location': image_location,
- 'image_id': image_id}))
- return image_id
+ def update(self, context, image_id, metadata, data=None):
+ # FIXME(vish): call to show is to check filter
+ self.show(context, image_id)
+ image = self.service.update(context, image_id, metadata, data)
+ return image
def index(self, context):
- """Return a list of all images that a user can see."""
- response = self._conn(context).make_request(
- method='GET',
- bucket='_images')
- images = json.loads(response.read())
- return [map_s3_to_base(i) for i in images]
+ images = self.service.index(context)
+ # FIXME(vish): index doesn't filter so we do it manually
+ return self._filter(context, images)
+
+ def detail(self, context):
+ images = self.service.detail(context)
+ # FIXME(vish): detail doesn't filter so we do it manually
+ return self._filter(context, images)
+
+ @classmethod
+ def _is_visible(cls, context, image):
+ return (context.is_admin
+ or context.project_id == image['properties']['owner_id']
+ or image['properties']['is_public'] == 'True')
+
+ @classmethod
+ def _filter(cls, context, images):
+ filtered = []
+ for image in images:
+ if not cls._is_visible(context, image):
+ continue
+ filtered.append(image)
+ return filtered
def show(self, context, image_id):
- """return a image object if the context has permissions"""
- if FLAGS.connection_type == 'fake':
- return {'imageId': 'bar'}
- result = self.index(context)
- result = [i for i in result if i['id'] == image_id]
- if not result:
- raise exception.NotFound(_('Image %s could not be found')
- % image_id)
- image = result[0]
+ image = self.service.show(context, image_id)
+ if not self._is_visible(context, image):
+ raise exception.NotFound
return image
- def deregister(self, context, image_id):
- """ unregister an image """
- self._conn(context).make_request(
- method='DELETE',
- bucket='_images',
- query_args=self._qs({'image_id': image_id}))
+ def show_by_name(self, context, name):
+ image = self.service.show_by_name(context, name)
+ if not self._is_visible(context, image):
+ raise exception.NotFound
+ return image
- def _conn(self, context):
+ @staticmethod
+ def _conn(context):
+ # TODO(vish): is there a better way to get creds to sign
+ # for the user?
access = manager.AuthManager().get_access_key(context.user,
context.project)
secret = str(context.user.secret)
@@ -120,8 +120,159 @@ class S3ImageService(service.BaseImageService):
port=FLAGS.s3_port,
host=FLAGS.s3_host)
- def _qs(self, params):
- pairs = []
- for key in params.keys():
- pairs.append(key + '=' + urllib.quote(params[key]))
- return '&'.join(pairs)
+ @staticmethod
+ def _download_file(bucket, filename, local_dir):
+ key = bucket.get_key(filename)
+ local_filename = os.path.join(local_dir, filename)
+ key.get_contents_to_filename(local_filename)
+ return local_filename
+
+ def _s3_create(self, context, metadata):
+ """Gets a manifext from s3 and makes an image"""
+
+ image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
+
+ image_location = metadata['properties']['image_location']
+ bucket_name = image_location.split("/")[0]
+ manifest_path = image_location[len(bucket_name) + 1:]
+ bucket = self._conn(context).get_bucket(bucket_name)
+ key = bucket.get_key(manifest_path)
+ manifest = key.get_contents_as_string()
+
+ manifest = ElementTree.fromstring(manifest)
+ image_format = 'ami'
+ image_type = 'machine'
+
+ try:
+ kernel_id = manifest.find("machine_configuration/kernel_id").text
+ if kernel_id == 'true':
+ image_format = 'aki'
+ image_type = 'kernel'
+ kernel_id = None
+ except Exception:
+ kernel_id = None
+
+ try:
+ ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
+ if ramdisk_id == 'true':
+ image_format = 'ari'
+ image_type = 'ramdisk'
+ ramdisk_id = None
+ except Exception:
+ ramdisk_id = None
+
+ try:
+ arch = manifest.find("machine_configuration/architecture").text
+ except Exception:
+ arch = 'x86_64'
+
+ properties = metadata['properties']
+ properties['owner_id'] = context.project_id
+ properties['architecture'] = arch
+
+ if kernel_id:
+ properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
+
+ if ramdisk_id:
+ properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
+
+ properties['is_public'] = False
+ properties['type'] = image_type
+ metadata.update({'disk_format': image_format,
+ 'container_format': image_format,
+ 'status': 'queued',
+ 'is_public': True,
+ 'properties': properties})
+ metadata['properties']['image_state'] = 'pending'
+ image = self.service.create(context, metadata)
+ image_id = image['id']
+
+ def delayed_create():
+ """This handles the fetching and decrypting of the part files."""
+ parts = []
+ for fn_element in manifest.find("image").getiterator("filename"):
+ part = self._download_file(bucket, fn_element.text, image_path)
+ parts.append(part)
+
+ # NOTE(vish): this may be suboptimal, should we use cat?
+ encrypted_filename = os.path.join(image_path, 'image.encrypted')
+ with open(encrypted_filename, 'w') as combined:
+ for filename in parts:
+ with open(filename) as part:
+ shutil.copyfileobj(part, combined)
+
+ metadata['properties']['image_state'] = 'decrypting'
+ self.service.update(context, image_id, metadata)
+
+ hex_key = manifest.find("image/ec2_encrypted_key").text
+ encrypted_key = binascii.a2b_hex(hex_key)
+ hex_iv = manifest.find("image/ec2_encrypted_iv").text
+ encrypted_iv = binascii.a2b_hex(hex_iv)
+
+ # FIXME(vish): grab key from common service so this can run on
+ # any host.
+ cloud_pk = crypto.key_path(context.project_id)
+
+ decrypted_filename = os.path.join(image_path, 'image.tar.gz')
+ self._decrypt_image(encrypted_filename, encrypted_key,
+ encrypted_iv, cloud_pk, decrypted_filename)
+
+ metadata['properties']['image_state'] = 'untarring'
+ self.service.update(context, image_id, metadata)
+
+ unz_filename = self._untarzip_image(image_path, decrypted_filename)
+
+ metadata['properties']['image_state'] = 'uploading'
+ with open(unz_filename) as image_file:
+ self.service.update(context, image_id, metadata, image_file)
+ metadata['properties']['image_state'] = 'available'
+ self.service.update(context, image_id, metadata)
+
+ shutil.rmtree(image_path)
+
+ eventlet.spawn_n(delayed_create)
+
+ return image
+
+ @staticmethod
+ def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
+ cloud_private_key, decrypted_filename):
+ key, err = utils.execute('openssl',
+ 'rsautl',
+ '-decrypt',
+ '-inkey', '%s' % cloud_private_key,
+ process_input=encrypted_key,
+ check_exit_code=False)
+ if err:
+ raise exception.Error(_("Failed to decrypt private key: %s")
+ % err)
+ iv, err = utils.execute('openssl',
+ 'rsautl',
+ '-decrypt',
+ '-inkey', '%s' % cloud_private_key,
+ process_input=encrypted_iv,
+ check_exit_code=False)
+ if err:
+ raise exception.Error(_("Failed to decrypt initialization "
+ "vector: %s") % err)
+
+ _out, err = utils.execute('openssl', 'enc',
+ '-d', '-aes-128-cbc',
+ '-in', '%s' % (encrypted_filename,),
+ '-K', '%s' % (key,),
+ '-iv', '%s' % (iv,),
+ '-out', '%s' % (decrypted_filename,),
+ check_exit_code=False)
+ if err:
+ raise exception.Error(_("Failed to decrypt image file "
+ "%(image_file)s: %(err)s") %
+ {'image_file': encrypted_filename,
+ 'err': err})
+
+ @staticmethod
+ def _untarzip_image(path, filename):
+ tar_file = tarfile.open(filename, "r|gz")
+ tar_file.extractall(path)
+ image_file = tar_file.getnames()[0]
+ tar_file.close()
+ return os.path.join(path, image_file)
diff --git a/nova/image/service.py b/nova/image/service.py
index ebee2228d..b9897ecae 100644
--- a/nova/image/service.py
+++ b/nova/image/service.py
@@ -16,9 +16,33 @@
# under the License.
+from nova import utils
+
+
class BaseImageService(object):
+ """Base class for providing image search and retrieval services
+
+ ImageService exposes two concepts of metadata:
+
+ 1. First-class attributes: This is metadata that is common to all
+ ImageService subclasses and is shared across all hypervisors. These
+ attributes are defined by IMAGE_ATTRS.
+
+ 2. Properties: This is metdata that is specific to an ImageService,
+ and Image, or a particular hypervisor. Any attribute not present in
+ BASE_IMAGE_ATTRS should be considered an image property.
- """Base class for providing image search and retrieval services"""
+ This means that ImageServices will return BASE_IMAGE_ATTRS as keys in the
+ metadata dict, all other attributes will be returned as keys in the nested
+ 'properties' dict.
+ """
+ BASE_IMAGE_ATTRS = ['id', 'name', 'created_at', 'updated_at',
+ 'deleted_at', 'deleted', 'status', 'is_public']
+
+ # NOTE(sirp): ImageService subclasses may override this to aid translation
+ # between BaseImageService attributes and additional metadata stored by
+ # the ImageService subclass
+ SERVICE_IMAGE_ATTRS = []
def index(self, context):
"""
@@ -40,9 +64,9 @@ class BaseImageService(object):
:retval: a sequence of mappings with the following signature
{'id': opaque id of image,
'name': name of image,
- 'created_at': creation timestamp,
- 'updated_at': modification timestamp,
- 'deleted_at': deletion timestamp or None,
+ 'created_at': creation datetime object,
+ 'updated_at': modification datetime object,
+ 'deleted_at': deletion datetime object or None,
'deleted': boolean indicating if image has been deleted,
'status': string description of image status,
'is_public': boolean indicating if image is public
@@ -56,17 +80,17 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def show(self, context, id):
+ def show(self, context, image_id):
"""
- Returns a dict containing image data for the given opaque image id.
+ Returns a dict containing image metadata for the given opaque image id.
:retval a mapping with the following signature:
{'id': opaque id of image,
'name': name of image,
- 'created_at': creation timestamp,
- 'updated_at': modification timestamp,
- 'deleted_at': deletion timestamp or None,
+ 'created_at': creation datetime object,
+ 'updated_at': modification datetime object,
+ 'deleted_at': deletion datetime object or None,
'deleted': boolean indicating if image has been deleted,
'status': string description of image status,
'is_public': boolean indicating if image is public
@@ -76,17 +100,27 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def create(self, context, data):
+ def get(self, context, data):
+ """
+ Returns a dict containing image metadata and writes image data to data.
+
+ :param data: a file-like object to hold binary image data
+
+ :raises NotFound if the image does not exist
+ """
+ raise NotImplementedError
+
+ def create(self, context, metadata, data=None):
"""
- Store the image data and return the new image id.
+ Store the image metadata and data and return the new image metadata.
:raises AlreadyExists if the image already exist.
"""
raise NotImplementedError
- def update(self, context, image_id, data):
- """Replace the contents of the given image with the new data.
+ def update(self, context, image_id, metadata, data=None):
+ """Update the given image metadata and data and return the metadata
:raises NotFound if the image does not exist.
@@ -101,3 +135,38 @@ class BaseImageService(object):
"""
raise NotImplementedError
+
+ @classmethod
+ def _translate_to_base(cls, metadata):
+ """Return a metadata dictionary that is BaseImageService compliant.
+
+ This is used by subclasses to expose only a metadata dictionary that
+ is the same across ImageService implementations.
+ """
+ return cls._propertify_metadata(metadata, cls.BASE_IMAGE_ATTRS)
+
+ @classmethod
+ def _translate_to_service(cls, metadata):
+ """Return a metadata dictionary that is usable by the ImageService
+ subclass.
+
+ As an example, Glance has additional attributes (like 'location'); the
+ BaseImageService considers these properties, but we need to translate
+ these back to first-class attrs for sending to Glance. This method
+ handles this by allowing you to specify the attributes an ImageService
+ considers first-class.
+ """
+ if not cls.SERVICE_IMAGE_ATTRS:
+ raise NotImplementedError(_("Cannot use this without specifying "
+ "SERVICE_IMAGE_ATTRS for subclass"))
+ return cls._propertify_metadata(metadata, cls.SERVICE_IMAGE_ATTRS)
+
+ @staticmethod
+ def _propertify_metadata(metadata, keys):
+ """Return a dict with any unrecognized keys placed in the nested
+ 'properties' dict.
+ """
+ flattened = utils.flatten_dict(metadata)
+ attributes, properties = utils.partition_dict(flattened, keys)
+ attributes['properties'] = properties
+ return attributes
diff --git a/nova/log.py b/nova/log.py
index 87a21ddb4..d194ab8f0 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -266,7 +266,10 @@ class NovaRootLogger(NovaLogger):
def handle_exception(type, value, tb):
- logging.root.critical(str(value), exc_info=(type, value, tb))
+ extra = {}
+ if FLAGS.verbose:
+ extra['exc_info'] = (type, value, tb)
+ logging.root.critical(str(value), **extra)
def reset():
diff --git a/nova/manager.py b/nova/manager.py
index 3d38504bd..804a50479 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -53,11 +53,14 @@ This module provides Manager, a base class for managers.
from nova import utils
from nova import flags
+from nova import log as logging
from nova.db import base
-
+from nova.scheduler import api
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.manager')
+
class Manager(base.Base):
def __init__(self, host=None, db_driver=None):
@@ -74,3 +77,29 @@ class Manager(base.Base):
"""Do any initialization that needs to be run if this is a standalone
service. Child classes should override this method."""
pass
+
+
+class SchedulerDependentManager(Manager):
+ """Periodically send capability updates to the Scheduler services.
+ Services that need to update the Scheduler of their capabilities
+ should derive from this class. Otherwise they can derive from
+ manager.Manager directly. Updates are only sent after
+ update_service_capabilities is called with non-None values."""
+
+ def __init__(self, host=None, db_driver=None, service_name="undefined"):
+ self.last_capabilities = None
+ self.service_name = service_name
+ super(SchedulerDependentManager, self).__init__(host, db_driver)
+
+ def update_service_capabilities(self, capabilities):
+ """Remember these capabilities to send on next periodic update."""
+ self.last_capabilities = capabilities
+
+ def periodic_tasks(self, context=None):
+ """Pass data back to the scheduler at a periodic interval"""
+ if self.last_capabilities:
+ LOG.debug(_("Notifying Schedulers of capabilities ..."))
+ api.update_service_capabilities(context, self.service_name,
+ self.host, self.last_capabilities)
+
+ super(SchedulerDependentManager, self).periodic_tasks(context)
diff --git a/nova/network/api.py b/nova/network/api.py
index bf43acb51..c56e3062b 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -21,6 +21,7 @@ Handles all requests relating to instances (guest vms).
"""
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
@@ -65,6 +66,21 @@ class API(base.Base):
if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
+ # Check if the floating ip address is allocated
+ if floating_ip['project_id'] is None:
+ raise exception.ApiError(_("Address (%s) is not allocated") %
+ floating_ip['address'])
+ # Check if the floating ip address is allocated to the same project
+ if floating_ip['project_id'] != context.project_id:
+ LOG.warn(_("Address (%(address)s) is not allocated to your "
+ "project (%(project)s)"),
+ {'address': floating_ip['address'],
+ 'project': context.project_id})
+ raise exception.ApiError(_("Address (%(address)s) is not "
+ "allocated to your project"
+ "(%(project)s)") %
+ {'address': floating_ip['address'],
+ 'project': context.project_id})
# NOTE(vish): Perhaps we should just pass this on to compute and
# let compute communicate with network.
host = fixed_ip['network']['host']
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 535ce87bc..d11d21dad 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -1,3 +1,5 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@@ -17,7 +19,9 @@
Implements vlans, bridges, and iptables rules using linux utilities.
"""
+import inspect
import os
+import calendar
from nova import db
from nova import exception
@@ -25,7 +29,6 @@ from nova import flags
from nova import log as logging
from nova import utils
-
LOG = logging.getLogger("nova.linux_net")
@@ -41,21 +44,18 @@ flags.DEFINE_string('dhcpbridge_flagfile',
flags.DEFINE_string('dhcp_domain',
'novalocal',
'domain to use for building the hostnames')
-
flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files')
flags.DEFINE_string('public_interface', 'eth0',
'Interface for public IP addresses')
-flags.DEFINE_string('vlan_interface', 'eth0',
- 'network device for vlans')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge')
flags.DEFINE_string('routing_source_ip', '$my_ip',
'Public IP of network host')
-flags.DEFINE_bool('use_nova_chains', False,
- 'use the nova_ routing chains instead of default')
flags.DEFINE_string('input_chain', 'INPUT',
'chain to add nova_input to')
+flags.DEFINE_integer('dhcp_lease_time', 120,
+ 'Lifetime of a DHCP lease')
flags.DEFINE_string('dns_server', None,
'if set, uses specific dns server for dnsmasq')
@@ -63,115 +63,366 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
'dmz range that should be accepted')
+binary_name = os.path.basename(inspect.stack()[-1][1])
+
+
+class IptablesRule(object):
+ """An iptables rule
+
+ You shouldn't need to use this class directly, it's only used by
+ IptablesManager
+ """
+ def __init__(self, chain, rule, wrap=True, top=False):
+ self.chain = chain
+ self.rule = rule
+ self.wrap = wrap
+ self.top = top
+
+ def __eq__(self, other):
+ return ((self.chain == other.chain) and
+ (self.rule == other.rule) and
+ (self.top == other.top) and
+ (self.wrap == other.wrap))
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __str__(self):
+ if self.wrap:
+ chain = '%s-%s' % (binary_name, self.chain)
+ else:
+ chain = self.chain
+ return '-A %s %s' % (chain, self.rule)
+
+
+class IptablesTable(object):
+ """An iptables table"""
+
+ def __init__(self):
+ self.rules = []
+ self.chains = set()
+ self.unwrapped_chains = set()
+
+ def add_chain(self, name, wrap=True):
+ """Adds a named chain to the table
+
+ The chain name is wrapped to be unique for the component creating
+ it, so different components of Nova can safely create identically
+ named chains without interfering with one another.
+
+ At the moment, its wrapped name is <binary name>-<chain name>,
+ so if nova-compute creates a chain named "OUTPUT", it'll actually
+ end up named "nova-compute-OUTPUT".
+ """
+ if wrap:
+ self.chains.add(name)
+ else:
+ self.unwrapped_chains.add(name)
+
+ def remove_chain(self, name, wrap=True):
+ """Remove named chain
+
+ This removal "cascades". All rule in the chain are removed, as are
+ all rules in other chains that jump to it.
+
+ If the chain is not found, this is merely logged.
+ """
+ if wrap:
+ chain_set = self.chains
+ else:
+ chain_set = self.unwrapped_chains
+
+ if name not in chain_set:
+ LOG.debug(_("Attempted to remove chain %s which doesn't exist"),
+ name)
+ return
+
+ chain_set.remove(name)
+ self.rules = filter(lambda r: r.chain != name, self.rules)
+
+ if wrap:
+ jump_snippet = '-j %s-%s' % (binary_name, name)
+ else:
+ jump_snippet = '-j %s' % (name,)
+
+ self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
+
+ def add_rule(self, chain, rule, wrap=True, top=False):
+ """Add a rule to the table
+
+ This is just like what you'd feed to iptables, just without
+ the "-A <chain name>" bit at the start.
+
+ However, if you need to jump to one of your wrapped chains,
+ prepend its name with a '$' which will ensure the wrapping
+ is applied correctly.
+ """
+ if wrap and chain not in self.chains:
+ raise ValueError(_("Unknown chain: %r") % chain)
+
+ if '$' in rule:
+ rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
+
+ self.rules.append(IptablesRule(chain, rule, wrap, top))
+
+ def _wrap_target_chain(self, s):
+ if s.startswith('$'):
+ return '%s-%s' % (binary_name, s[1:])
+ return s
+
+ def remove_rule(self, chain, rule, wrap=True, top=False):
+ """Remove a rule from a chain
+
+ Note: The rule must be exactly identical to the one that was added.
+ You cannot switch arguments around like you can with the iptables
+ CLI tool.
+ """
+ try:
+ self.rules.remove(IptablesRule(chain, rule, wrap, top))
+ except ValueError:
+ LOG.debug(_("Tried to remove rule that wasn't there:"
+ " %(chain)r %(rule)r %(wrap)r %(top)r"),
+ {'chain': chain, 'rule': rule,
+ 'top': top, 'wrap': wrap})
+
+
+class IptablesManager(object):
+ """Wrapper for iptables
+
+ See IptablesTable for some usage docs
+
+ A number of chains are set up to begin with.
+
+ First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
+ name is not wrapped, so it's shared between the various nova workers. It's
+ intended for rules that need to live at the top of the FORWARD and OUTPUT
+ chains. It's in both the ipv4 and ipv6 set of tables.
+
+ For ipv4 and ipv6, the builtin INPUT, OUTPUT, and FORWARD filter chains are
+ wrapped, meaning that the "real" INPUT chain has a rule that jumps to the
+ wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
+ "local" which is jumped to from nova-filter-top.
+
+ For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
+ wrapped in the same was as the builtin filter chains. Additionally, there's
+ a snat chain that is applied after the POSTROUTING chain.
+ """
+ def __init__(self, execute=None):
+ if not execute:
+ self.execute = _execute
+ else:
+ self.execute = execute
+
+ self.ipv4 = {'filter': IptablesTable(),
+ 'nat': IptablesTable()}
+ self.ipv6 = {'filter': IptablesTable()}
+
+ # Add a nova-filter-top chain. It's intended to be shared
+ # among the various nova components. It sits at the very top
+ # of FORWARD and OUTPUT.
+ for tables in [self.ipv4, self.ipv6]:
+ tables['filter'].add_chain('nova-filter-top', wrap=False)
+ tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
+ wrap=False, top=True)
+ tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
+ wrap=False, top=True)
+
+ tables['filter'].add_chain('local')
+ tables['filter'].add_rule('nova-filter-top', '-j $local',
+ wrap=False)
+
+ # Wrap the builtin chains
+ builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
+ 'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
+ 6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
+
+ for ip_version in builtin_chains:
+ if ip_version == 4:
+ tables = self.ipv4
+ elif ip_version == 6:
+ tables = self.ipv6
+
+ for table, chains in builtin_chains[ip_version].iteritems():
+ for chain in chains:
+ tables[table].add_chain(chain)
+ tables[table].add_rule(chain, '-j $%s' % (chain,),
+ wrap=False)
+
+ # Add a nova-postrouting-bottom chain. It's intended to be shared
+ # among the various nova components. We set it as the last chain
+ # of POSTROUTING chain.
+ self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
+ self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
+ wrap=False)
+
+ # We add a snat chain to the shared nova-postrouting-bottom chain
+ # so that it's applied last.
+ self.ipv4['nat'].add_chain('snat')
+ self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
+ wrap=False)
+
+ # And then we add a floating-snat chain and jump to first thing in
+ # the snat chain.
+ self.ipv4['nat'].add_chain('floating-snat')
+ self.ipv4['nat'].add_rule('snat', '-j $floating-snat')
+
+ @utils.synchronized('iptables', external=True)
+ def apply(self):
+ """Apply the current in-memory set of iptables rules
+
+ This will blow away any rules left over from previous runs of the
+ same component of Nova, and replace them with our current set of
+ rules. This happens atomically, thanks to iptables-restore.
+ """
+ s = [('iptables', self.ipv4)]
+ if FLAGS.use_ipv6:
+ s += [('ip6tables', self.ipv6)]
+
+ for cmd, tables in s:
+ for table in tables:
+ current_table, _ = self.execute('sudo',
+ '%s-save' % (cmd,),
+ '-t', '%s' % (table,),
+ attempts=5)
+ current_lines = current_table.split('\n')
+ new_filter = self._modify_rules(current_lines,
+ tables[table])
+ self.execute('sudo', '%s-restore' % (cmd,),
+ process_input='\n'.join(new_filter),
+ attempts=5)
+
+ def _modify_rules(self, current_lines, table, binary=None):
+ unwrapped_chains = table.unwrapped_chains
+ chains = table.chains
+ rules = table.rules
+
+ # Remove any trace of our rules
+ new_filter = filter(lambda line: binary_name not in line,
+ current_lines)
+
+ seen_chains = False
+ rules_index = 0
+ for rules_index, rule in enumerate(new_filter):
+ if not seen_chains:
+ if rule.startswith(':'):
+ seen_chains = True
+ else:
+ if not rule.startswith(':'):
+ break
+
+ our_rules = []
+ for rule in rules:
+ rule_str = str(rule)
+ if rule.top:
+ # rule.top == True means we want this rule to be at the top.
+ # Further down, we weed out duplicates from the bottom of the
+ # list, so here we remove the dupes ahead of time.
+ new_filter = filter(lambda s: s.strip() != rule_str.strip(),
+ new_filter)
+ our_rules += [rule_str]
+
+ new_filter[rules_index:rules_index] = our_rules
+
+ new_filter[rules_index:rules_index] = [':%s - [0:0]' % \
+ (name,) \
+ for name in unwrapped_chains]
+ new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' % \
+ (binary_name, name,) \
+ for name in chains]
+
+ seen_lines = set()
+
+ def _weed_out_duplicates(line):
+ line = line.strip()
+ if line in seen_lines:
+ return False
+ else:
+ seen_lines.add(line)
+ return True
+
+ # We filter duplicates, letting the *last* occurrence take
+ # precendence.
+ new_filter.reverse()
+ new_filter = filter(_weed_out_duplicates, new_filter)
+ new_filter.reverse()
+ return new_filter
+
+
def metadata_forward():
"""Create forwarding rule for metadata"""
- _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 "
- "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT "
- "--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
+ iptables_manager.ipv4['nat'].add_rule("PREROUTING",
+ "-s 0.0.0.0/0 -d 169.254.169.254/32 "
+ "-p tcp -m tcp --dport 80 -j DNAT "
+ "--to-destination %s:%s" % \
+ (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
+ iptables_manager.apply()
def init_host():
"""Basic networking setup goes here"""
-
- if FLAGS.use_nova_chains:
- _execute("sudo iptables -N nova_input", check_exit_code=False)
- _execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain,
- check_exit_code=False)
- _execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain)
-
- _execute("sudo iptables -N nova_forward", check_exit_code=False)
- _execute("sudo iptables -D FORWARD -j nova_forward",
- check_exit_code=False)
- _execute("sudo iptables -A FORWARD -j nova_forward")
-
- _execute("sudo iptables -N nova_output", check_exit_code=False)
- _execute("sudo iptables -D OUTPUT -j nova_output",
- check_exit_code=False)
- _execute("sudo iptables -A OUTPUT -j nova_output")
-
- _execute("sudo iptables -t nat -N nova_prerouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting")
-
- _execute("sudo iptables -t nat -N nova_postrouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting")
-
- _execute("sudo iptables -t nat -N nova_snatting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting")
-
- _execute("sudo iptables -t nat -N nova_output", check_exit_code=False)
- _execute("sudo iptables -t nat -D OUTPUT -j nova_output",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A OUTPUT -j nova_output")
- else:
- # NOTE(vish): This makes it easy to ensure snatting rules always
- # come after the accept rules in the postrouting chain
- _execute("sudo iptables -t nat -N SNATTING",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D POSTROUTING -j SNATTING",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A POSTROUTING -j SNATTING")
-
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
- _confirm_rule("SNATTING", "-t nat -s %s "
- "-j SNAT --to-source %s"
- % (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True)
+ iptables_manager.ipv4['nat'].add_rule("snat",
+ "-s %s -j SNAT --to-source %s" % \
+ (FLAGS.fixed_range,
+ FLAGS.routing_source_ip))
+
+ iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
+ "-s %s -d %s -j ACCEPT" % \
+ (FLAGS.fixed_range, FLAGS.dmz_cidr))
- _confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" %
- (FLAGS.fixed_range, FLAGS.dmz_cidr))
- _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
- {'range': FLAGS.fixed_range})
+ iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
+ "-s %(range)s -d %(range)s "
+ "-j ACCEPT" % \
+ {'range': FLAGS.fixed_range})
+ iptables_manager.apply()
def bind_floating_ip(floating_ip, check_exit_code=True):
"""Bind ip to public interface"""
- _execute("sudo ip addr add %s dev %s" % (floating_ip,
- FLAGS.public_interface),
+ _execute('sudo', 'ip', 'addr', 'add', floating_ip,
+ 'dev', FLAGS.public_interface,
check_exit_code=check_exit_code)
def unbind_floating_ip(floating_ip):
"""Unbind a public ip from public interface"""
- _execute("sudo ip addr del %s dev %s" % (floating_ip,
- FLAGS.public_interface))
+ _execute('sudo', 'ip', 'addr', 'del', floating_ip,
+ 'dev', FLAGS.public_interface)
def ensure_vlan_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan"""
- _confirm_rule("FORWARD", "-d %s -p udp --dport 1194 -j ACCEPT" %
- private_ip)
- _confirm_rule("PREROUTING",
- "-t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
- % (public_ip, port, private_ip))
+ iptables_manager.ipv4['filter'].add_rule("FORWARD",
+ "-d %s -p udp "
+ "--dport 1194 "
+ "-j ACCEPT" % private_ip)
+ iptables_manager.ipv4['nat'].add_rule("PREROUTING",
+ "-d %s -p udp "
+ "--dport %s -j DNAT --to %s:1194" %
+ (public_ip, port, private_ip))
+ iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule"""
- _confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
- % (fixed_ip, floating_ip))
+ for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
+ iptables_manager.ipv4['nat'].add_rule(chain, rule)
+ iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip"""
- _remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
- % (fixed_ip, floating_ip))
+ for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
+ iptables_manager.ipv4['nat'].remove_rule(chain, rule)
+ iptables_manager.apply()
+
+
+def floating_forward_rules(floating_ip, fixed_ip):
+ return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
+ ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
+ ("floating-snat",
+ "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))]
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
@@ -185,9 +436,9 @@ def ensure_vlan(vlan_num):
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
LOG.debug(_("Starting VLAN inteface %s"), interface)
- _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
- _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
- _execute("sudo ip link set %s up" % interface)
+ _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
+ _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
+ _execute('sudo', 'ip', 'link', 'set', interface, 'up')
return interface
@@ -206,75 +457,80 @@ def ensure_bridge(bridge, interface, net_attrs=None):
"""
if not _device_exists(bridge):
LOG.debug(_("Starting Bridge interface for %s"), interface)
- _execute("sudo brctl addbr %s" % bridge)
- _execute("sudo brctl setfd %s 0" % bridge)
+ _execute('sudo', 'brctl', 'addbr', bridge)
+ _execute('sudo', 'brctl', 'setfd', bridge, 0)
# _execute("sudo brctl setageing %s 10" % bridge)
- _execute("sudo brctl stp %s off" % bridge)
- _execute("sudo ip link set %s up" % bridge)
+ _execute('sudo', 'brctl', 'stp', bridge, 'off')
+ _execute('sudo', 'ip', 'link', 'set', bridge, 'up')
if net_attrs:
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
suffix = net_attrs['cidr'].rpartition('/')[2]
- out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
- (net_attrs['gateway'],
- suffix,
- net_attrs['broadcast'],
- bridge),
+ out, err = _execute('sudo', 'ip', 'addr', 'add',
+ "%s/%s" %
+ (net_attrs['gateway'], suffix),
+ 'brd',
+ net_attrs['broadcast'],
+ 'dev',
+ bridge,
check_exit_code=False)
if err and err != "RTNETLINK answers: File exists\n":
raise exception.Error("Failed to add ip: %s" % err)
if(FLAGS.use_ipv6):
- _execute("sudo ip -f inet6 addr change %s dev %s" %
- (net_attrs['cidr_v6'], bridge))
+ _execute('sudo', 'ip', '-f', 'inet6', 'addr',
+ 'change', net_attrs['cidr_v6'],
+ 'dev', bridge)
# NOTE(vish): If the public interface is the same as the
# bridge, then the bridge has to be in promiscuous
# to forward packets properly.
if(FLAGS.public_interface == bridge):
- _execute("sudo ip link set dev %s promisc on" % bridge)
+ _execute('sudo', 'ip', 'link', 'set',
+ 'dev', bridge, 'promisc', 'on')
if interface:
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
gateway = None
- out, err = _execute("sudo route -n")
+ out, err = _execute('sudo', 'route', '-n')
for line in out.split("\n"):
fields = line.split()
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
gateway = fields[1]
- out, err = _execute("sudo ip addr show dev %s scope global" %
- interface)
+ out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
+ 'scope', 'global')
for line in out.split("\n"):
fields = line.split()
if fields and fields[0] == "inet":
- params = ' '.join(fields[1:-1])
- _execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
- _execute("sudo ip addr add %s dev %s" % (params, bridge))
+ params = fields[1:-1]
+ _execute(*_ip_bridge_cmd('del', params, fields[-1]))
+ _execute(*_ip_bridge_cmd('add', params, bridge))
if gateway:
- _execute("sudo route add 0.0.0.0 gw %s" % gateway)
- out, err = _execute("sudo brctl addif %s %s" %
- (bridge, interface),
+ _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway)
+ out, err = _execute('sudo', 'brctl', 'addif', bridge, interface,
check_exit_code=False)
if (err and err != "device %s is already a member of a bridge; can't "
"enslave it to bridge %s.\n" % (interface, bridge)):
raise exception.Error("Failed to add interface: %s" % err)
- if FLAGS.use_nova_chains:
- (out, err) = _execute("sudo iptables -N nova_forward",
- check_exit_code=False)
- if err != 'iptables: Chain already exists.\n':
- # NOTE(vish): chain didn't exist link chain
- _execute("sudo iptables -D FORWARD -j nova_forward",
- check_exit_code=False)
- _execute("sudo iptables -A FORWARD -j nova_forward")
+ iptables_manager.ipv4['filter'].add_rule("FORWARD",
+ "--in-interface %s -j ACCEPT" % \
+ bridge)
+ iptables_manager.ipv4['filter'].add_rule("FORWARD",
+ "--out-interface %s -j ACCEPT" % \
+ bridge)
- _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
- _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
- _execute("sudo iptables -N nova-local", check_exit_code=False)
- _confirm_rule("FORWARD", "-j nova-local")
+
+def get_dhcp_leases(context, network_id):
+ """Return a network's hosts config in dnsmasq leasefile format"""
+ hosts = []
+ for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
+ network_id):
+ hosts.append(_host_lease(fixed_ip_ref))
+ return '\n'.join(hosts)
def get_dhcp_hosts(context, network_id):
- """Get a string containing a network's hosts config in dnsmasq format"""
+ """Get a string containing a network's hosts config in dhcp-host format"""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@@ -285,6 +541,7 @@ def get_dhcp_hosts(context, network_id):
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
+@utils.synchronized('dnsmasq_start')
def update_dhcp(context, network_id):
"""(Re)starts a dnsmasq server for a given network
@@ -304,13 +561,13 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
- out, _err = _execute('cat /proc/%d/cmdline' % pid,
+ out, _err = _execute('cat', "/proc/%d/cmdline" % pid,
check_exit_code=False)
if conffile in out:
try:
- _execute('sudo kill -HUP %d' % pid)
+ _execute('sudo', 'kill', '-HUP', pid)
return
- except Exception as exc: # pylint: disable-msg=W0703
+ except Exception as exc: # pylint: disable=W0703
LOG.debug(_("Hupping dnsmasq threw %s"), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
@@ -319,9 +576,10 @@ def update_dhcp(context, network_id):
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
'DNSMASQ_INTERFACE': network_ref['bridge']}
command = _dnsmasq_cmd(network_ref)
- _execute(command, addl_env=env)
+ _execute(*command, addl_env=env)
+@utils.synchronized('radvd_start')
def update_ra(context, network_id):
network_ref = db.network_get(context, network_id)
@@ -349,24 +607,40 @@ interface %s
# if radvd is already running, then tell it to reload
if pid:
- out, _err = _execute('cat /proc/%d/cmdline'
+ out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
- _execute('sudo kill %d' % pid)
- except Exception as exc: # pylint: disable-msg=W0703
+ _execute('sudo', 'kill', pid)
+ except Exception as exc: # pylint: disable=W0703
LOG.debug(_("killing radvd threw %s"), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
command = _ra_cmd(network_ref)
- _execute(command)
+ _execute(*command)
db.network_update(context, network_id,
- {"ra_server":
+ {"gateway_v6":
utils.get_my_linklocal(network_ref['bridge'])})
+def _host_lease(fixed_ip_ref):
+ """Return a host string for an address in leasefile format"""
+ instance_ref = fixed_ip_ref['instance']
+ if instance_ref['updated_at']:
+ timestamp = instance_ref['updated_at']
+ else:
+ timestamp = instance_ref['created_at']
+
+ seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
+
+ return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time,
+ instance_ref['mac_address'],
+ fixed_ip_ref['address'],
+ instance_ref['hostname'] or '*')
+
+
def _host_dhcp(fixed_ip_ref):
- """Return a host string for an address"""
+ """Return a host string for an address in dhcp-host format"""
instance_ref = fixed_ip_ref['instance']
return "%s,%s.%s,%s" % (instance_ref['mac_address'],
instance_ref['hostname'],
@@ -374,68 +648,48 @@ def _host_dhcp(fixed_ip_ref):
fixed_ip_ref['address'])
-def _execute(cmd, *args, **kwargs):
+def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network"""
if FLAGS.fake_network:
- LOG.debug("FAKE NET: %s", cmd)
+ LOG.debug("FAKE NET: %s", " ".join(map(str, cmd)))
return "fake", 0
else:
- return utils.execute(cmd, *args, **kwargs)
+ return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists"""
- (_out, err) = _execute("ip link show dev %s" % device,
+ (_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
-def _confirm_rule(chain, cmd, append=False):
- """Delete and re-add iptables rule"""
- if FLAGS.use_nova_chains:
- chain = "nova_%s" % chain.lower()
- if append:
- loc = "-A"
- else:
- loc = "-I"
- _execute("sudo iptables --delete %s %s" % (chain, cmd),
- check_exit_code=False)
- _execute("sudo iptables %s %s %s" % (loc, chain, cmd))
-
-
-def _remove_rule(chain, cmd):
- """Remove iptables rule"""
- if FLAGS.use_nova_chains:
- chain = "%s" % chain.lower()
- _execute("sudo iptables --delete %s %s" % (chain, cmd))
-
-
def _dnsmasq_cmd(net):
"""Builds dnsmasq command"""
- cmd = ['sudo -E dnsmasq',
- ' --strict-order',
- ' --bind-interfaces',
- ' --conf-file=',
- ' --domain=%s' % FLAGS.dhcp_domain,
- ' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
- ' --listen-address=%s' % net['gateway'],
- ' --except-interface=lo',
- ' --dhcp-range=%s,static,120s' % net['dhcp_start'],
- ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
- ' --dhcp-script=%s' % FLAGS.dhcpbridge,
- ' --leasefile-ro']
+ cmd = ['sudo', '-E', 'dnsmasq',
+ '--strict-order',
+ '--bind-interfaces',
+ '--conf-file=',
+ '--domain=%s' % FLAGS.dhcp_domain,
+ '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
+ '--listen-address=%s' % net['gateway'],
+ '--except-interface=lo',
+ '--dhcp-range=%s,static,120s' % net['dhcp_start'],
+ '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
+ '--dhcp-script=%s' % FLAGS.dhcpbridge,
+ '--leasefile-ro']
if FLAGS.dns_server:
- cmd.append(' -h -R --server=%s' % FLAGS.dns_server)
- return ''.join(cmd)
+ cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
+ return cmd
def _ra_cmd(net):
"""Builds radvd command"""
- cmd = ['sudo -E radvd',
-# ' -u nobody',
- ' -C %s' % _ra_file(net['bridge'], 'conf'),
- ' -p %s' % _ra_file(net['bridge'], 'pid')]
- return ''.join(cmd)
+ cmd = ['sudo', '-E', 'radvd',
+# '-u', 'nobody',
+ '-C', '%s' % _ra_file(net['bridge'], 'conf'),
+ '-p', '%s' % _ra_file(net['bridge'], 'pid')]
+ return cmd
def _stop_dnsmasq(network):
@@ -444,8 +698,8 @@ def _stop_dnsmasq(network):
if pid:
try:
- _execute('sudo kill -TERM %d' % pid)
- except Exception as exc: # pylint: disable-msg=W0703
+ _execute('sudo', 'kill', '-TERM', pid)
+ except Exception as exc: # pylint: disable=W0703
LOG.debug(_("Killing dnsmasq threw %s"), exc)
@@ -497,3 +751,15 @@ def _ra_pid_for(bridge):
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
+
+
+def _ip_bridge_cmd(action, params, device):
+ """Build commands to add/del ips to bridges/devices"""
+
+ cmd = ['sudo', 'ip', 'addr', action]
+ cmd.extend(params)
+ cmd.extend(['dev', device])
+ return cmd
+
+
+iptables_manager = IptablesManager()
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 12a0c5018..86ee4fc00 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -73,7 +73,9 @@ flags.DEFINE_string('flat_interface', None,
flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2',
'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
-flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
+flags.DEFINE_string('vlan_interface', 'eth0',
+ 'network device for vlans')
+flags.DEFINE_integer('num_networks', 1, 'Number of networks to support')
flags.DEFINE_string('vpn_ip', '$my_ip',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
@@ -105,7 +107,7 @@ class AddressAlreadyAllocated(exception.Error):
pass
-class NetworkManager(manager.Manager):
+class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
@@ -116,7 +118,8 @@ class NetworkManager(manager.Manager):
if not network_driver:
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
- super(NetworkManager, self).__init__(*args, **kwargs)
+ super(NetworkManager, self).__init__(service_name='network',
+ *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -163,11 +166,22 @@ class NetworkManager(manager.Manager):
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
- raise NotImplementedError()
+ # TODO(vish): when this is called by compute, we can associate compute
+ # with a network, or a cluster of computes with a network
+ # and use that network here with a method like
+ # network_get_by_compute_host
+ network_ref = self.db.network_get_by_bridge(context.elevated(),
+ FLAGS.flat_network_bridge)
+ address = self.db.fixed_ip_associate_pool(context.elevated(),
+ network_ref['id'],
+ instance_id)
+ self.db.fixed_ip_update(context, address, {'allocated': True})
+ return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
"""Returns a fixed ip to the pool."""
- raise NotImplementedError()
+ self.db.fixed_ip_update(context, address, {'allocated': False})
+ self.db.fixed_ip_disassociate(context.elevated(), address)
def setup_fixed_ip(self, context, address):
"""Sets up rules for fixed ip."""
@@ -257,20 +271,72 @@ class NetworkManager(manager.Manager):
def get_network_host(self, context):
"""Get the network host for the current context."""
- raise NotImplementedError()
+ network_ref = self.db.network_get_by_bridge(context,
+ FLAGS.flat_network_bridge)
+ # NOTE(vish): If the network has no host, use the network_host flag.
+ # This could eventually be a a db lookup of some sort, but
+ # a flag is easy to handle for now.
+ host = network_ref['host']
+ if not host:
+ topic = self.db.queue_get_for(context,
+ FLAGS.network_topic,
+ FLAGS.network_host)
+ if FLAGS.fake_call:
+ return self.set_network_host(context, network_ref['id'])
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return host
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, *args, **kwargs):
+ cidr_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
- raise NotImplementedError()
+ fixed_net = IPy.IP(cidr)
+ fixed_net_v6 = IPy.IP(cidr_v6)
+ significant_bits_v6 = 64
+ network_size_v6 = 1 << 64
+ count = 1
+ for index in range(num_networks):
+ start = index * network_size
+ start_v6 = index * network_size_v6
+ significant_bits = 32 - int(math.log(network_size, 2))
+ cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ project_net = IPy.IP(cidr)
+ net = {}
+ net['bridge'] = FLAGS.flat_network_bridge
+ net['dns'] = FLAGS.flat_network_dns
+ net['cidr'] = cidr
+ net['netmask'] = str(project_net.netmask())
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast())
+ net['dhcp_start'] = str(project_net[2])
+ if num_networks > 1:
+ net['label'] = "%s_%d" % (label, count)
+ else:
+ net['label'] = label
+ count += 1
+
+ if(FLAGS.use_ipv6):
+ cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
+ significant_bits_v6)
+ net['cidr_v6'] = cidr_v6
+ project_net_v6 = IPy.IP(cidr_v6)
+ net['gateway_v6'] = str(project_net_v6[1])
+ net['netmask_v6'] = str(project_net_v6.prefixlen())
+
+ network_ref = self.db.network_create_safe(context, net)
+
+ if network_ref:
+ self._create_fixed_ips(context, network_ref['id'])
@property
- def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
+ def _bottom_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
- def _top_reserved_ips(self): # pylint: disable-msg=R0201
+ def _top_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the top of the range."""
return 1 # broadcast
@@ -332,84 +398,10 @@ class FlatManager(NetworkManager):
for network in self.db.host_get_networks(ctxt, self.host):
self._on_set_network_host(ctxt, network['id'])
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool."""
- # TODO(vish): when this is called by compute, we can associate compute
- # with a network, or a cluster of computes with a network
- # and use that network here with a method like
- # network_get_by_compute_host
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- address = self.db.fixed_ip_associate_pool(context.elevated(),
- network_ref['id'],
- instance_id)
- self.db.fixed_ip_update(context, address, {'allocated': True})
- return address
-
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
- self.db.fixed_ip_disassociate(context.elevated(), address)
-
def setup_compute_network(self, context, instance_id):
"""Network is created manually."""
pass
- def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, label, *args, **kwargs):
- """Create networks based on parameters."""
- fixed_net = IPy.IP(cidr)
- fixed_net_v6 = IPy.IP(cidr_v6)
- significant_bits_v6 = 64
- count = 1
- for index in range(num_networks):
- start = index * network_size
- significant_bits = 32 - int(math.log(network_size, 2))
- cidr = "%s/%s" % (fixed_net[start], significant_bits)
- project_net = IPy.IP(cidr)
- net = {}
- net['bridge'] = FLAGS.flat_network_bridge
- net['dns'] = FLAGS.flat_network_dns
- net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['dhcp_start'] = str(project_net[2])
- if num_networks > 1:
- net['label'] = "%s_%d" % (label, count)
- else:
- net['label'] = label
- count += 1
-
- if(FLAGS.use_ipv6):
- cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
- net['cidr_v6'] = cidr_v6
-
- network_ref = self.db.network_create_safe(context, net)
-
- if network_ref:
- self._create_fixed_ips(context, network_ref['id'])
-
- def get_network_host(self, context):
- """Get the network host for the current context."""
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- # NOTE(vish): If the network has no host, use the network_host flag.
- # This could eventually be a a db lookup of some sort, but
- # a flag is easy to handle for now.
- host = network_ref['host']
- if not host:
- topic = self.db.queue_get_for(context,
- FLAGS.network_topic,
- FLAGS.network_host)
- if FLAGS.fake_call:
- return self.set_network_host(context, network_ref['id'])
- host = rpc.call(context,
- FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
- return host
-
def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a network."""
net = {}
@@ -434,7 +426,7 @@ class FlatManager(NetworkManager):
raise NotImplementedError()
-class FlatDHCPManager(FlatManager):
+class FlatDHCPManager(NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
@@ -548,6 +540,11 @@ class VlanManager(NetworkManager):
' than 4094'))
fixed_net = IPy.IP(cidr)
+ if fixed_net.len() < num_networks * network_size:
+ raise ValueError(_('The network range is not big enough to fit '
+ '%(num_networks)s. Network size is %(network_size)s' %
+ locals()))
+
fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64
significant_bits_v6 = 64
@@ -575,6 +572,16 @@ class VlanManager(NetworkManager):
# NOTE(vish): This makes ports unique accross the cloud, a more
# robust solution would be to make them unique per ip
net['vpn_public_port'] = vpn_start + index
+ network_ref = None
+ try:
+ network_ref = db.network_get_by_cidr(context, cidr)
+ except exception.NotFound:
+ pass
+
+ if network_ref is not None:
+ raise ValueError(_('Network with cidr %s already exists' %
+ cidr))
+
network_ref = self.db.network_create_safe(context, net)
if network_ref:
self._create_fixed_ips(context, network_ref['id'])
diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py
new file mode 100644
index 000000000..93e6584f0
--- /dev/null
+++ b/nova/network/vmwareapi_net.py
@@ -0,0 +1,91 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Implements vlans for vmwareapi.
+"""
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.vmwareapi_conn import VMWareAPISession
+from nova.virt.vmwareapi import network_utils
+
+LOG = logging.getLogger("nova.network.vmwareapi_net")
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vlan_interface', 'vmnic0',
+ 'Physical network adapter name in VMware ESX host for '
+ 'vlan networking')
+
+
+def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+ """Create a vlan and bridge unless they already exist."""
+ # Open vmwareapi session
+ host_ip = FLAGS.vmwareapi_host_ip
+ host_username = FLAGS.vmwareapi_host_username
+ host_password = FLAGS.vmwareapi_host_password
+ if not host_ip or host_username is None or host_password is None:
+ raise Exception(_("Must specify vmwareapi_host_ip,"
+ "vmwareapi_host_username "
+ "and vmwareapi_host_password to use"
+ "connection_type=vmwareapi"))
+ session = VMWareAPISession(host_ip, host_username, host_password,
+ FLAGS.vmwareapi_api_retry_count)
+ vlan_interface = FLAGS.vlan_interface
+ # Check if the vlan_interface physical network adapter exists on the host
+ if not network_utils.check_if_vlan_interface_exists(session,
+ vlan_interface):
+ raise exception.NotFound(_("There is no physical network adapter with "
+ "the name %s on the ESX host") % vlan_interface)
+
+ # Get the vSwitch associated with the Physical Adapter
+ vswitch_associated = network_utils.get_vswitch_for_vlan_interface(
+ session, vlan_interface)
+ if vswitch_associated is None:
+ raise exception.NotFound(_("There is no virtual switch associated "
+ "with the physical network adapter with name %s") %
+ vlan_interface)
+ # Check whether bridge already exists and retrieve the the ref of the
+ # network whose name_label is "bridge"
+ network_ref = network_utils.get_network_with_the_name(session, bridge)
+ if network_ref is None:
+ # Create a port group on the vSwitch associated with the vlan_interface
+ # corresponding physical network adapter on the ESX host
+ network_utils.create_port_group(session, bridge, vswitch_associated,
+ vlan_num)
+ else:
+ # Get the vlan id and vswitch corresponding to the port group
+ pg_vlanid, pg_vswitch = \
+ network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge)
+
+ # Check if the vsiwtch associated is proper
+ if pg_vswitch != vswitch_associated:
+ raise exception.Invalid(_("vSwitch which contains the port group "
+ "%(bridge)s is not associated with the desired "
+ "physical adapter. Expected vSwitch is "
+ "%(vswitch_associated)s, but the one associated"
+ " is %(pg_vswitch)s") % locals())
+
+ # Check if the vlan id is proper for the port group
+ if pg_vlanid != vlan_num:
+ raise exception.Invalid(_("VLAN tag is not appropriate for the "
+ "port group %(bridge)s. Expected VLAN tag is "
+ "%(vlan_num)s, but the one associated with the "
+ "port group is %(pg_vlanid)s") % locals())
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
new file mode 100644
index 000000000..9a99602d9
--- /dev/null
+++ b/nova/network/xenapi_net.py
@@ -0,0 +1,85 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Implements vlans, bridges, and iptables rules using linux utilities.
+"""
+
+import os
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.xenapi_conn import XenAPISession
+from nova.virt.xenapi import network_utils
+
+LOG = logging.getLogger("nova.xenapi_net")
+
+FLAGS = flags.FLAGS
+
+
+def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+ """Create a vlan and bridge unless they already exist."""
+ # Open xenapi session
+ LOG.debug("ENTERING ensure_vlan_bridge in xenapi net")
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ session = XenAPISession(url, username, password)
+ # Check whether bridge already exists
+ # Retrieve network whose name_label is "bridge"
+ network_ref = network_utils.NetworkHelper.find_network_with_name_label(
+ session,
+ bridge)
+ if network_ref == None:
+ # If bridge does not exists
+ # 1 - create network
+ description = "network for nova bridge %s" % bridge
+ network_rec = {'name_label': bridge,
+ 'name_description': description,
+ 'other_config': {}}
+ network_ref = session.call_xenapi('network.create', network_rec)
+ # 2 - find PIF for VLAN
+ expr = 'field "device" = "%s" and \
+ field "VLAN" = "-1"' % FLAGS.vlan_interface
+ pifs = session.call_xenapi('PIF.get_all_records_where', expr)
+ pif_ref = None
+ # Multiple PIF are ok: we are dealing with a pool
+ if len(pifs) == 0:
+ raise Exception(
+ _('Found no PIF for device %s') % FLAGS.vlan_interface)
+ # 3 - create vlan for network
+ for pif_ref in pifs.keys():
+ session.call_xenapi('VLAN.create',
+ pif_ref,
+ str(vlan_num),
+ network_ref)
+ else:
+ # Check VLAN tag is appropriate
+ network_rec = session.call_xenapi('network.get_record', network_ref)
+ # Retrieve PIFs from network
+ for pif_ref in network_rec['PIFs']:
+ # Retrieve VLAN from PIF
+ pif_rec = session.call_xenapi('PIF.get_record', pif_ref)
+ pif_vlan = int(pif_rec['VLAN'])
+ # Raise an exception if VLAN != vlan_num
+ if pif_vlan != vlan_num:
+ raise Exception(_("PIF %(pif_rec['uuid'])s for network "
+ "%(bridge)s has VLAN id %(pif_vlan)d. "
+ "Expected %(vlan_num)d") % locals())
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
deleted file mode 100644
index b213e18e8..000000000
--- a/nova/objectstore/bucket.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Simple object store using Blobs and JSON files on disk.
-"""
-
-import bisect
-import datetime
-import glob
-import json
-import os
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.objectstore import stored
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('buckets_path', '$state_path/buckets',
- 'path to s3 buckets')
-
-
-class Bucket(object):
- def __init__(self, name):
- self.name = name
- self.path = os.path.abspath(os.path.join(FLAGS.buckets_path, name))
- if not self.path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound()
-
- self.ctime = os.path.getctime(self.path)
-
- def __repr__(self):
- return "<Bucket: %s>" % self.name
-
- @staticmethod
- def all():
- """ list of all buckets """
- buckets = []
- for fn in glob.glob("%s/*.json" % FLAGS.buckets_path):
- try:
- json.load(open(fn))
- name = os.path.split(fn)[-1][:-5]
- buckets.append(Bucket(name))
- except:
- pass
-
- return buckets
-
- @staticmethod
- def create(bucket_name, context):
- """Create a new bucket owned by a project.
-
- @bucket_name: a string representing the name of the bucket to create
- @context: a nova.auth.api.ApiContext object representing who owns the
- bucket.
-
- Raises:
- NotAuthorized: if the bucket is already exists or has invalid name
- """
- path = os.path.abspath(os.path.join(
- FLAGS.buckets_path, bucket_name))
- if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- os.path.exists(path):
- raise exception.NotAuthorized()
-
- os.makedirs(path)
-
- with open(path + '.json', 'w') as f:
- json.dump({'ownerId': context.project_id}, f)
-
- @property
- def metadata(self):
- """ dictionary of metadata around bucket,
- keys are 'Name' and 'CreationDate'
- """
-
- return {
- "Name": self.name,
- "CreationDate": datetime.datetime.utcfromtimestamp(self.ctime),
- }
-
- @property
- def owner_id(self):
- try:
- with open(self.path + '.json') as f:
- return json.load(f)['ownerId']
- except:
- return None
-
- def is_authorized(self, context):
- try:
- return context.is_admin or \
- self.owner_id == context.project_id
- except Exception, e:
- return False
-
- def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
- object_names = []
- path_length = len(self.path)
- for root, dirs, files in os.walk(self.path):
- for file_name in files:
- object_name = os.path.join(root, file_name)[path_length + 1:]
- object_names.append(object_name)
- object_names.sort()
- contents = []
-
- start_pos = 0
- if marker:
- start_pos = bisect.bisect_right(object_names, marker, start_pos)
- if prefix:
- start_pos = bisect.bisect_left(object_names, prefix, start_pos)
-
- truncated = False
- for object_name in object_names[start_pos:]:
- if not object_name.startswith(prefix):
- break
- if len(contents) >= max_keys:
- truncated = True
- break
- object_path = self._object_path(object_name)
- c = {"Key": object_name}
- if not terse:
- info = os.stat(object_path)
- c.update({
- "LastModified": datetime.datetime.utcfromtimestamp(
- info.st_mtime),
- "Size": info.st_size,
- })
- contents.append(c)
- marker = object_name
-
- return {
- "Name": self.name,
- "Prefix": prefix,
- "Marker": marker,
- "MaxKeys": max_keys,
- "IsTruncated": truncated,
- "Contents": contents,
- }
-
- def _object_path(self, object_name):
- fn = os.path.join(self.path, object_name)
-
- if not fn.startswith(self.path):
- raise exception.NotAuthorized()
-
- return fn
-
- def delete(self):
- if len(os.listdir(self.path)) > 0:
- raise exception.NotEmpty()
- os.rmdir(self.path)
- os.remove(self.path + '.json')
-
- def __getitem__(self, key):
- return stored.Object(self, key)
-
- def __setitem__(self, key, value):
- with open(self._object_path(key), 'wb') as f:
- f.write(value)
-
- def __delitem__(self, key):
- stored.Object(self, key).delete()
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
deleted file mode 100644
index 05ddace4b..000000000
--- a/nova/objectstore/handler.py
+++ /dev/null
@@ -1,478 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2010 OpenStack LLC.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Implementation of an S3-like storage server based on local files.
-
-Useful to test features that will eventually run on S3, or if you want to
-run something locally that was once running on S3.
-
-We don't support all the features of S3, but it does work with the
-standard S3 client for the most basic semantics. To use the standard
-S3 client with this module::
-
- c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
- is_secure=False)
- c.create_bucket("mybucket")
- c.put("mybucket", "mykey", "a value")
- print c.get("mybucket", "mykey").body
-
-"""
-
-import datetime
-import json
-import multiprocessing
-import os
-import urllib
-
-from twisted.application import internet
-from twisted.application import service
-from twisted.web import error
-from twisted.web import resource
-from twisted.web import server
-from twisted.web import static
-
-from nova import context
-from nova import exception
-from nova import flags
-from nova import log as logging
-from nova import utils
-from nova.auth import manager
-from nova.objectstore import bucket
-from nova.objectstore import image
-
-
-LOG = logging.getLogger('nova.objectstore.handler')
-FLAGS = flags.FLAGS
-flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
-
-
-def render_xml(request, value):
- """Writes value as XML string to request"""
- assert isinstance(value, dict) and len(value) == 1
- request.setHeader("Content-Type", "application/xml; charset=UTF-8")
-
- name = value.keys()[0]
- request.write('<?xml version="1.0" encoding="UTF-8"?>\n')
- request.write('<' + utils.utf8(name) +
- ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
- _render_parts(value.values()[0], request.write)
- request.write('</' + utils.utf8(name) + '>')
- request.finish()
-
-
-def finish(request, content=None):
- """Finalizer method for request"""
- if content:
- request.write(content)
- request.finish()
-
-
-def _render_parts(value, write_cb):
- """Helper method to render different Python objects to XML"""
- if isinstance(value, basestring):
- write_cb(utils.xhtml_escape(value))
- elif isinstance(value, int) or isinstance(value, long):
- write_cb(str(value))
- elif isinstance(value, datetime.datetime):
- write_cb(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
- elif isinstance(value, dict):
- for name, subvalue in value.iteritems():
- if not isinstance(subvalue, list):
- subvalue = [subvalue]
- for subsubvalue in subvalue:
- write_cb('<' + utils.utf8(name) + '>')
- _render_parts(subsubvalue, write_cb)
- write_cb('</' + utils.utf8(name) + '>')
- else:
- raise Exception(_("Unknown S3 value type %r"), value)
-
-
-def get_argument(request, key, default_value):
- """Returns the request's value at key, or default_value
- if not found
- """
- if key in request.args:
- return request.args[key][0]
- return default_value
-
-
-def get_context(request):
- """Returns the supplied request's context object"""
- try:
- # Authorization Header format: 'AWS <access>:<secret>'
- authorization_header = request.getHeader('Authorization')
- if not authorization_header:
- raise exception.NotAuthorized()
- auth_header_value = authorization_header.split(' ')[1]
- access, _ignored, secret = auth_header_value.rpartition(':')
- am = manager.AuthManager()
- (user, project) = am.authenticate(access,
- secret,
- {},
- request.method,
- request.getRequestHostname(),
- request.uri,
- headers=request.getAllHeaders(),
- check_type='s3')
- rv = context.RequestContext(user, project)
- LOG.audit(_("Authenticated request"), context=rv)
- return rv
- except exception.Error as ex:
- LOG.debug(_("Authentication Failure: %s"), ex)
- raise exception.NotAuthorized()
-
-
-class ErrorHandlingResource(resource.Resource):
- """Maps exceptions to 404 / 401 codes. Won't work for
- exceptions thrown after NOT_DONE_YET is returned.
- """
- # TODO(unassigned) (calling-all-twisted-experts): This needs to be
- # plugged in to the right place in twisted...
- # This doesn't look like it's the right place
- # (consider exceptions in getChild; or after
- # NOT_DONE_YET is returned
- def render(self, request):
- """Renders the response as XML"""
- try:
- return resource.Resource.render(self, request)
- except exception.NotFound:
- request.setResponseCode(404)
- return ''
- except exception.NotAuthorized:
- request.setResponseCode(403)
- return ''
-
-
-class S3(ErrorHandlingResource):
- """Implementation of an S3-like storage server based on local files."""
- def __init__(self):
- ErrorHandlingResource.__init__(self)
-
- def getChild(self, name, request): # pylint: disable-msg=C0103
- """Returns either the image or bucket resource"""
- request.context = get_context(request)
- if name == '':
- return self
- elif name == '_images':
- return ImagesResource()
- else:
- return BucketResource(name)
-
- def render_GET(self, request): # pylint: disable-msg=R0201
- """Renders the GET request for a list of buckets as XML"""
- LOG.debug(_('List of buckets requested'), context=request.context)
- buckets = [b for b in bucket.Bucket.all()
- if b.is_authorized(request.context)]
-
- render_xml(request, {"ListAllMyBucketsResult": {
- "Buckets": {"Bucket": [b.metadata for b in buckets]},
- }})
- return server.NOT_DONE_YET
-
-
-class BucketResource(ErrorHandlingResource):
- """A web resource containing an S3-like bucket"""
- def __init__(self, name):
- resource.Resource.__init__(self)
- self.name = name
-
- def getChild(self, name, request):
- """Returns the bucket resource itself, or the object resource
- the bucket contains if a name is supplied
- """
- if name == '':
- return self
- else:
- return ObjectResource(bucket.Bucket(self.name), name)
-
- def render_GET(self, request):
- "Returns the keys for the bucket resource"""
- LOG.debug(_("List keys for bucket %s"), self.name)
-
- try:
- bucket_object = bucket.Bucket(self.name)
- except exception.NotFound:
- return error.NoResource(message="No such bucket").render(request)
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to access bucket %s"),
- self.name, context=request.context)
- raise exception.NotAuthorized()
-
- prefix = get_argument(request, "prefix", u"")
- marker = get_argument(request, "marker", u"")
- max_keys = int(get_argument(request, "max-keys", 1000))
- terse = int(get_argument(request, "terse", 0))
-
- results = bucket_object.list_keys(prefix=prefix,
- marker=marker,
- max_keys=max_keys,
- terse=terse)
- render_xml(request, {"ListBucketResult": results})
- return server.NOT_DONE_YET
-
- def render_PUT(self, request):
- "Creates the bucket resource"""
- LOG.debug(_("Creating bucket %s"), self.name)
- LOG.debug("calling bucket.Bucket.create(%r, %r)",
- self.name,
- request.context)
- bucket.Bucket.create(self.name, request.context)
- request.finish()
- return server.NOT_DONE_YET
-
- def render_DELETE(self, request):
- """Deletes the bucket resource"""
- LOG.debug(_("Deleting bucket %s"), self.name)
- bucket_object = bucket.Bucket(self.name)
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete bucket %s"),
- self.name, context=request.context)
- raise exception.NotAuthorized()
-
- bucket_object.delete()
- request.setResponseCode(204)
- return ''
-
-
-class ObjectResource(ErrorHandlingResource):
- """The resource returned from a bucket"""
- def __init__(self, bucket, name):
- resource.Resource.__init__(self)
- self.bucket = bucket
- self.name = name
-
- def render_GET(self, request):
- """Returns the object
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- bname = self.bucket.name
- nm = self.name
- LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals())
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to get object %(nm)s"
- " from bucket %(bname)s") % locals(),
- context=request.context)
- raise exception.NotAuthorized()
-
- obj = self.bucket[urllib.unquote(self.name)]
- request.setHeader("Content-Type", "application/unknown")
- request.setHeader("Last-Modified",
- datetime.datetime.utcfromtimestamp(obj.mtime))
- request.setHeader("Etag", '"' + obj.md5 + '"')
- return static.File(obj.path).render_GET(request)
-
- def render_PUT(self, request):
- """Modifies/inserts the object and returns a result code
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- nm = self.name
- bname = self.bucket.name
- LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals())
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to upload object %(nm)s to"
- " bucket %(bname)s") % locals(), context=request.context)
- raise exception.NotAuthorized()
-
- key = urllib.unquote(self.name)
- request.content.seek(0, 0)
- self.bucket[key] = request.content.read()
- request.setHeader("Etag", '"' + self.bucket[key].md5 + '"')
- finish(request)
- return server.NOT_DONE_YET
-
- def render_DELETE(self, request):
- """Deletes the object and returns a result code
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- nm = self.name
- bname = self.bucket.name
- LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(),
- context=request.context)
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete object %(nm)s from "
- "bucket %(bname)s") % locals(), context=request.context)
- raise exception.NotAuthorized()
-
- del self.bucket[urllib.unquote(self.name)]
- request.setResponseCode(204)
- return ''
-
-
-class ImageResource(ErrorHandlingResource):
- """A web resource representing a single image"""
- isLeaf = True
-
- def __init__(self, name):
- resource.Resource.__init__(self)
- self.img = image.Image(name)
-
- def render_GET(self, request):
- """Returns the image file"""
- if not self.img.is_authorized(request.context, True):
- raise exception.NotAuthorized()
- return static.File(self.img.image_path,
- defaultType='application/octet-stream').\
- render_GET(request)
-
-
-class ImagesResource(resource.Resource):
- """A web resource representing a list of images"""
-
- def getChild(self, name, _request):
- """Returns itself or an ImageResource if no name given"""
- if name == '':
- return self
- else:
- return ImageResource(name)
-
- def render_GET(self, request): # pylint: disable-msg=R0201
- """ returns a json listing of all images
- that a user has permissions to see """
-
- images = [i for i in image.Image.all() \
- if i.is_authorized(request.context, readonly=True)]
-
- # Bug #617776:
- # We used to have 'type' in the image metadata, but this field
- # should be called 'imageType', as per the EC2 specification.
- # For compat with old metadata files we copy type to imageType if
- # imageType is not present.
- # For compat with euca2ools (and any other clients using the
- # incorrect name) we copy imageType to type.
- # imageType is primary if we end up with both in the metadata file
- # (which should never happen).
- def decorate(m):
- if 'imageType' not in m and 'type' in m:
- m[u'imageType'] = m['type']
- elif 'imageType' in m:
- m[u'type'] = m['imageType']
- if 'displayName' not in m:
- m[u'displayName'] = u''
- return m
-
- request.write(json.dumps([decorate(i.metadata) for i in images]))
- request.finish()
- return server.NOT_DONE_YET
-
- def render_PUT(self, request): # pylint: disable-msg=R0201
- """ create a new registered image """
-
- image_id = get_argument(request, 'image_id', u'')
- image_location = get_argument(request, 'image_location', u'')
-
- image_path = os.path.join(FLAGS.images_path, image_id)
- if ((not image_path.startswith(FLAGS.images_path)) or
- os.path.exists(image_path)):
- LOG.audit(_("Not authorized to upload image: invalid directory "
- "%s"),
- image_path, context=request.context)
- raise exception.NotAuthorized()
-
- bucket_object = bucket.Bucket(image_location.split("/")[0])
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Not authorized to upload image: unauthorized "
- "bucket %s"), bucket_object.name,
- context=request.context)
- raise exception.NotAuthorized()
-
- LOG.audit(_("Starting image upload: %s"), image_id,
- context=request.context)
- p = multiprocessing.Process(target=image.Image.register_aws_image,
- args=(image_id, image_location, request.context))
- p.start()
- return ''
-
- def render_POST(self, request): # pylint: disable-msg=R0201
- """Update image attributes: public/private"""
-
- # image_id required for all requests
- image_id = get_argument(request, 'image_id', u'')
- image_object = image.Image(image_id)
- if not image_object.is_authorized(request.context):
- LOG.audit(_("Not authorized to update attributes of image %s"),
- image_id, context=request.context)
- raise exception.NotAuthorized()
-
- operation = get_argument(request, 'operation', u'')
- if operation:
- # operation implies publicity toggle
- newstatus = (operation == 'add')
- LOG.audit(_("Toggling publicity flag of image %(image_id)s"
- " %(newstatus)r") % locals(), context=request.context)
- image_object.set_public(newstatus)
- else:
- # other attributes imply update
- LOG.audit(_("Updating user fields on image %s"), image_id,
- context=request.context)
- clean_args = {}
- for arg in request.args.keys():
- clean_args[arg] = request.args[arg][0]
- image_object.update_user_editable_fields(clean_args)
- return ''
-
- def render_DELETE(self, request): # pylint: disable-msg=R0201
- """Delete a registered image"""
- image_id = get_argument(request, "image_id", u"")
- image_object = image.Image(image_id)
-
- if not image_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete image %s"),
- image_id, context=request.context)
- raise exception.NotAuthorized()
-
- image_object.delete()
- LOG.audit(_("Deleted image: %s"), image_id, context=request.context)
-
- request.setResponseCode(204)
- return ''
-
-
-def get_site():
- """Support for WSGI-like interfaces"""
- root = S3()
- site = server.Site(root)
- return site
-
-
-def get_application():
- """Support WSGI-like interfaces"""
- factory = get_site()
- application = service.Application("objectstore")
- # Disabled because of lack of proper introspection in Twisted
- # or possibly different versions of twisted?
- # pylint: disable-msg=E1101
- objectStoreService = internet.TCPServer(FLAGS.s3_port, factory,
- interface=FLAGS.s3_listen_host)
- objectStoreService.setServiceParent(application)
- return application
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
deleted file mode 100644
index 27227e2ca..000000000
--- a/nova/objectstore/image.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Take uploaded bucket contents and register them as disk images (AMIs).
-Requires decryption using keys in the manifest.
-"""
-
-
-import binascii
-import glob
-import json
-import os
-import shutil
-import tarfile
-from xml.etree import ElementTree
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.objectstore import bucket
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('images_path', '$state_path/images',
- 'path to decrypted images')
-
-
-class Image(object):
- def __init__(self, image_id):
- self.image_id = image_id
- self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
- if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound
-
- @property
- def image_path(self):
- return os.path.join(self.path, 'image')
-
- def delete(self):
- for fn in ['info.json', 'image']:
- try:
- os.unlink(os.path.join(self.path, fn))
- except:
- pass
- try:
- os.rmdir(self.path)
- except:
- pass
-
- def is_authorized(self, context, readonly=False):
- # NOTE(devcamcar): Public images can be read by anyone,
- # but only modified by admin or owner.
- try:
- return (self.metadata['isPublic'] and readonly) or \
- context.is_admin or \
- self.metadata['imageOwnerId'] == context.project_id
- except:
- return False
-
- def set_public(self, state):
- md = self.metadata
- md['isPublic'] = state
- with open(os.path.join(self.path, 'info.json'), 'w') as f:
- json.dump(md, f)
-
- def update_user_editable_fields(self, args):
- """args is from the request parameters, so requires extra cleaning"""
- fields = {'display_name': 'displayName', 'description': 'description'}
- info = self.metadata
- for field in fields.keys():
- if field in args:
- info[fields[field]] = args[field]
- with open(os.path.join(self.path, 'info.json'), 'w') as f:
- json.dump(info, f)
-
- @staticmethod
- def all():
- images = []
- for fn in glob.glob("%s/*/info.json" % FLAGS.images_path):
- try:
- image_id = fn.split('/')[-2]
- images.append(Image(image_id))
- except:
- pass
- return images
-
- @property
- def owner_id(self):
- return self.metadata['imageOwnerId']
-
- @property
- def metadata(self):
- with open(os.path.join(self.path, 'info.json')) as f:
- return json.load(f)
-
- @staticmethod
- def add(src, description, kernel=None, ramdisk=None, public=True):
- """adds an image to imagestore
-
- @type src: str
- @param src: location of the partition image on disk
-
- @type description: str
- @param description: string describing the image contents
-
- @type kernel: bool or str
- @param kernel: either TRUE meaning this partition is a kernel image or
- a string of the image id for the kernel
-
- @type ramdisk: bool or str
- @param ramdisk: either TRUE meaning this partition is a ramdisk image
- or a string of the image id for the ramdisk
-
-
- @type public: bool
- @param public: determine if this is a public image or private
-
- @rtype: str
- @return: a string with the image id
- """
-
- image_type = 'machine'
- image_id = utils.generate_uid('ami')
-
- if kernel is True:
- image_type = 'kernel'
- image_id = utils.generate_uid('aki')
- if ramdisk is True:
- image_type = 'ramdisk'
- image_id = utils.generate_uid('ari')
-
- image_path = os.path.join(FLAGS.images_path, image_id)
- os.makedirs(image_path)
-
- shutil.copyfile(src, os.path.join(image_path, 'image'))
-
- info = {
- 'imageId': image_id,
- 'imageLocation': description,
- 'imageOwnerId': 'system',
- 'isPublic': public,
- 'architecture': 'x86_64',
- 'imageType': image_type,
- 'state': 'available'}
-
- if type(kernel) is str and len(kernel) > 0:
- info['kernelId'] = kernel
-
- if type(ramdisk) is str and len(ramdisk) > 0:
- info['ramdiskId'] = ramdisk
-
- with open(os.path.join(image_path, 'info.json'), "w") as f:
- json.dump(info, f)
-
- return image_id
-
- @staticmethod
- def register_aws_image(image_id, image_location, context):
- image_path = os.path.join(FLAGS.images_path, image_id)
- os.makedirs(image_path)
-
- bucket_name = image_location.split("/")[0]
- manifest_path = image_location[len(bucket_name) + 1:]
- bucket_object = bucket.Bucket(bucket_name)
-
- manifest = ElementTree.fromstring(bucket_object[manifest_path].read())
- image_type = 'machine'
-
- try:
- kernel_id = manifest.find("machine_configuration/kernel_id").text
- if kernel_id == 'true':
- image_type = 'kernel'
- except:
- kernel_id = None
-
- try:
- ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
- if ramdisk_id == 'true':
- image_type = 'ramdisk'
- except:
- ramdisk_id = None
-
- try:
- arch = manifest.find("machine_configuration/architecture").text
- except:
- arch = 'x86_64'
-
- info = {
- 'imageId': image_id,
- 'imageLocation': image_location,
- 'imageOwnerId': context.project_id,
- 'isPublic': False, # FIXME: grab public from manifest
- 'architecture': arch,
- 'imageType': image_type}
-
- if kernel_id:
- info['kernelId'] = kernel_id
-
- if ramdisk_id:
- info['ramdiskId'] = ramdisk_id
-
- def write_state(state):
- info['imageState'] = state
- with open(os.path.join(image_path, 'info.json'), "w") as f:
- json.dump(info, f)
-
- write_state('pending')
-
- encrypted_filename = os.path.join(image_path, 'image.encrypted')
- with open(encrypted_filename, 'w') as f:
- for filename in manifest.find("image").getiterator("filename"):
- shutil.copyfileobj(bucket_object[filename.text].file, f)
-
- write_state('decrypting')
-
- # FIXME: grab kernelId and ramdiskId from bundle manifest
- hex_key = manifest.find("image/ec2_encrypted_key").text
- encrypted_key = binascii.a2b_hex(hex_key)
- hex_iv = manifest.find("image/ec2_encrypted_iv").text
- encrypted_iv = binascii.a2b_hex(hex_iv)
- cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem")
-
- decrypted_filename = os.path.join(image_path, 'image.tar.gz')
- Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
- cloud_private_key, decrypted_filename)
-
- write_state('untarring')
-
- image_file = Image.untarzip_image(image_path, decrypted_filename)
- shutil.move(os.path.join(image_path, image_file),
- os.path.join(image_path, 'image'))
-
- write_state('available')
- os.unlink(decrypted_filename)
- os.unlink(encrypted_filename)
-
- @staticmethod
- def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
- cloud_private_key, decrypted_filename):
- key, err = utils.execute(
- 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
- process_input=encrypted_key,
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt private key: %s")
- % err)
- iv, err = utils.execute(
- 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
- process_input=encrypted_iv,
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt initialization "
- "vector: %s") % err)
-
- _out, err = utils.execute(
- 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
- % (encrypted_filename, key, iv, decrypted_filename),
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt image file "
- "%(image_file)s: %(err)s") %
- {'image_file': encrypted_filename,
- 'err': err})
-
- @staticmethod
- def untarzip_image(path, filename):
- tar_file = tarfile.open(filename, "r|gz")
- tar_file.extractall(path)
- image_file = tar_file.getnames()[0]
- tar_file.close()
- return image_file
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
new file mode 100644
index 000000000..dd6327c8f
--- /dev/null
+++ b/nova/objectstore/s3server.py
@@ -0,0 +1,335 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack LLC.
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of an S3-like storage server based on local files.
+
+Useful to test features that will eventually run on S3, or if you want to
+run something locally that was once running on S3.
+
+We don't support all the features of S3, but it does work with the
+standard S3 client for the most basic semantics. To use the standard
+S3 client with this module:
+
+ c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
+ is_secure=False)
+ c.create_bucket("mybucket")
+ c.put("mybucket", "mykey", "a value")
+ print c.get("mybucket", "mykey").body
+
+"""
+
+import bisect
+import datetime
+import hashlib
+import os
+import os.path
+import urllib
+
+import routes
+import webob
+
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova import wsgi
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('buckets_path', '$state_path/buckets',
+ 'path to s3 buckets')
+
+
+class S3Application(wsgi.Router):
+ """Implementation of an S3-like storage server based on local files.
+
+ If bucket depth is given, we break files up into multiple directories
+ to prevent hitting file system limits for number of files in each
+ directories. 1 means one level of directories, 2 means 2, etc.
+
+ """
+
+ def __init__(self, root_directory, bucket_depth=0, mapper=None):
+ if mapper is None:
+ mapper = routes.Mapper()
+
+ mapper.connect('/',
+ controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
+ mapper.connect('/{bucket}/{object_name}',
+ controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw))
+ mapper.connect('/{bucket_name}/',
+ controller=lambda *a, **kw: BucketHandler(self)(*a, **kw))
+ self.directory = os.path.abspath(root_directory)
+ if not os.path.exists(self.directory):
+ os.makedirs(self.directory)
+ self.bucket_depth = bucket_depth
+ super(S3Application, self).__init__(mapper)
+
+
+class BaseRequestHandler(wsgi.Controller):
+ """Base class emulating Tornado's web framework pattern in WSGI.
+
+ This is a direct port of Tornado's implementation, so some key decisions
+ about how the code interacts have already been chosen.
+
+ The two most common ways of designing web frameworks can be
+ classified as async object-oriented and sync functional.
+
+ Tornado's is on the OO side because a response is built up in and using
+ the shared state of an object and one of the object's methods will
+ eventually trigger the "finishing" of the response asynchronously.
+
+ Most WSGI stuff is in the functional side, we pass a request object to
+ every call down a chain and the eventual return value will be a response.
+
+ Part of the function of the routing code in S3Application as well as the
+ code in BaseRequestHandler's __call__ method is to merge those two styles
+ together enough that the Tornado code can work without extensive
+ modifications.
+
+ To do that it needs to give the Tornado-style code clean objects that it
+ can modify the state of for each request that is processed, so we use a
+ very simple factory lambda to create new state for each request, that's
+ the stuff in the router, and when we let the Tornado code modify that
+ object to handle the request, then we return the response it generated.
+ This wouldn't work the same if Tornado was being more async'y and doing
+ other callbacks throughout the process, but since Tornado is being
+ relatively simple here we can be satisfied that the response will be
+ complete by the end of the get/post method.
+
+ """
+
+ def __init__(self, application):
+ self.application = application
+
+ @webob.dec.wsgify
+ def __call__(self, request):
+ method = request.method.lower()
+ f = getattr(self, method, self.invalid)
+ self.request = request
+ self.response = webob.Response()
+ params = request.environ['wsgiorg.routing_args'][1]
+ del params['controller']
+ f(**params)
+ return self.response
+
+ def get_argument(self, arg, default):
+ return self.request.str_params.get(arg, default)
+
+ def set_header(self, header, value):
+ self.response.headers[header] = value
+
+ def set_status(self, status_code):
+ self.response.status = status_code
+
+ def finish(self, body=''):
+ self.response.body = utils.utf8(body)
+
+ def invalid(self, **kwargs):
+ pass
+
+ def render_xml(self, value):
+ assert isinstance(value, dict) and len(value) == 1
+ self.set_header("Content-Type", "application/xml; charset=UTF-8")
+ name = value.keys()[0]
+ parts = []
+ parts.append('<' + utils.utf8(name) +
+ ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
+ self._render_parts(value.values()[0], parts)
+ parts.append('</' + utils.utf8(name) + '>')
+ self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
+ ''.join(parts))
+
+ def _render_parts(self, value, parts=[]):
+ if isinstance(value, basestring):
+ parts.append(utils.xhtml_escape(value))
+ elif isinstance(value, int) or isinstance(value, long):
+ parts.append(str(value))
+ elif isinstance(value, datetime.datetime):
+ parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
+ elif isinstance(value, dict):
+ for name, subvalue in value.iteritems():
+ if not isinstance(subvalue, list):
+ subvalue = [subvalue]
+ for subsubvalue in subvalue:
+ parts.append('<' + utils.utf8(name) + '>')
+ self._render_parts(subsubvalue, parts)
+ parts.append('</' + utils.utf8(name) + '>')
+ else:
+ raise Exception("Unknown S3 value type %r", value)
+
+ def _object_path(self, bucket, object_name):
+ if self.application.bucket_depth < 1:
+ return os.path.abspath(os.path.join(
+ self.application.directory, bucket, object_name))
+ hash = hashlib.md5(object_name).hexdigest()
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket))
+ for i in range(self.application.bucket_depth):
+ path = os.path.join(path, hash[:2 * (i + 1)])
+ return os.path.join(path, object_name)
+
+
+class RootHandler(BaseRequestHandler):
+ def get(self):
+ names = os.listdir(self.application.directory)
+ buckets = []
+ for name in names:
+ path = os.path.join(self.application.directory, name)
+ info = os.stat(path)
+ buckets.append({
+ "Name": name,
+ "CreationDate": datetime.datetime.utcfromtimestamp(
+ info.st_ctime),
+ })
+ self.render_xml({"ListAllMyBucketsResult": {
+ "Buckets": {"Bucket": buckets},
+ }})
+
+
+class BucketHandler(BaseRequestHandler):
+ def get(self, bucket_name):
+ prefix = self.get_argument("prefix", u"")
+ marker = self.get_argument("marker", u"")
+ max_keys = int(self.get_argument("max-keys", 50000))
+ path = os.path.abspath(os.path.join(self.application.directory,
+ bucket_name))
+ terse = int(self.get_argument("terse", 0))
+ if not path.startswith(self.application.directory) or \
+ not os.path.isdir(path):
+ self.set_status(404)
+ return
+ object_names = []
+ for root, dirs, files in os.walk(path):
+ for file_name in files:
+ object_names.append(os.path.join(root, file_name))
+ skip = len(path) + 1
+ for i in range(self.application.bucket_depth):
+ skip += 2 * (i + 1) + 1
+ object_names = [n[skip:] for n in object_names]
+ object_names.sort()
+ contents = []
+
+ start_pos = 0
+ if marker:
+ start_pos = bisect.bisect_right(object_names, marker, start_pos)
+ if prefix:
+ start_pos = bisect.bisect_left(object_names, prefix, start_pos)
+
+ truncated = False
+ for object_name in object_names[start_pos:]:
+ if not object_name.startswith(prefix):
+ break
+ if len(contents) >= max_keys:
+ truncated = True
+ break
+ object_path = self._object_path(bucket_name, object_name)
+ c = {"Key": object_name}
+ if not terse:
+ info = os.stat(object_path)
+ c.update({
+ "LastModified": datetime.datetime.utcfromtimestamp(
+ info.st_mtime),
+ "Size": info.st_size,
+ })
+ contents.append(c)
+ marker = object_name
+ self.render_xml({"ListBucketResult": {
+ "Name": bucket_name,
+ "Prefix": prefix,
+ "Marker": marker,
+ "MaxKeys": max_keys,
+ "IsTruncated": truncated,
+ "Contents": contents,
+ }})
+
+ def put(self, bucket_name):
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket_name))
+ if not path.startswith(self.application.directory) or \
+ os.path.exists(path):
+ self.set_status(403)
+ return
+ os.makedirs(path)
+ self.finish()
+
+ def delete(self, bucket_name):
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket_name))
+ if not path.startswith(self.application.directory) or \
+ not os.path.isdir(path):
+ self.set_status(404)
+ return
+ if len(os.listdir(path)) > 0:
+ self.set_status(403)
+ return
+ os.rmdir(path)
+ self.set_status(204)
+ self.finish()
+
+
+class ObjectHandler(BaseRequestHandler):
+ def get(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(self.application.directory) or \
+ not os.path.isfile(path):
+ self.set_status(404)
+ return
+ info = os.stat(path)
+ self.set_header("Content-Type", "application/unknown")
+ self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
+ info.st_mtime))
+ object_file = open(path, "r")
+ try:
+ self.finish(object_file.read())
+ finally:
+ object_file.close()
+
+ def put(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ bucket_dir = os.path.abspath(os.path.join(
+ self.application.directory, bucket))
+ if not bucket_dir.startswith(self.application.directory) or \
+ not os.path.isdir(bucket_dir):
+ self.set_status(404)
+ return
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(bucket_dir) or os.path.isdir(path):
+ self.set_status(403)
+ return
+ directory = os.path.dirname(path)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ object_file = open(path, "w")
+ object_file.write(self.request.body)
+ object_file.close()
+ self.set_header('ETag',
+ '"%s"' % hashlib.md5(self.request.body).hexdigest())
+ self.finish()
+
+ def delete(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(self.application.directory) or \
+ not os.path.isfile(path):
+ self.set_status(404)
+ return
+ os.unlink(path)
+ self.set_status(204)
+ self.finish()
diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py
deleted file mode 100644
index a3f6e9c0b..000000000
--- a/nova/objectstore/stored.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Properties of an object stored within a bucket.
-"""
-
-import os
-
-import nova.crypto
-from nova import exception
-
-
-class Object(object):
- def __init__(self, bucket, key):
- """ wrapper class of an existing key """
- self.bucket = bucket
- self.key = key
- self.path = bucket._object_path(key)
- if not os.path.isfile(self.path):
- raise exception.NotFound
-
- def __repr__(self):
- return "<Object %s/%s>" % (self.bucket, self.key)
-
- @property
- def md5(self):
- """ computes the MD5 of the contents of file """
- with open(self.path, "r") as f:
- return nova.crypto.compute_md5(f)
-
- @property
- def mtime(self):
- """ mtime of file """
- return os.path.getmtime(self.path)
-
- def read(self):
- """ read all contents of key into memory and return """
- return self.file.read()
-
- @property
- def file(self):
- """ return a file object for the key """
- return open(self.path, 'rb')
-
- def delete(self):
- """ deletes the file """
- os.unlink(self.path)
diff --git a/nova/quota.py b/nova/quota.py
index 6b52a97fa..2b24c0b5b 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -37,6 +37,12 @@ flags.DEFINE_integer('quota_floating_ips', 10,
'number of floating ips allowed per project')
flags.DEFINE_integer('quota_metadata_items', 128,
'number of metadata items allowed per instance')
+flags.DEFINE_integer('quota_max_injected_files', 5,
+ 'number of injected files allowed')
+flags.DEFINE_integer('quota_max_injected_file_content_bytes', 10 * 1024,
+ 'number of bytes allowed per injected file')
+flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255,
+ 'number of bytes allowed per injected file path')
def get_quota(context, project_id):
@@ -46,6 +52,7 @@ def get_quota(context, project_id):
'gigabytes': FLAGS.quota_gigabytes,
'floating_ips': FLAGS.quota_floating_ips,
'metadata_items': FLAGS.quota_metadata_items}
+
try:
quota = db.quota_get(context, project_id)
for key in rval.keys():
@@ -106,6 +113,21 @@ def allowed_metadata_items(context, num_metadata_items):
return min(num_metadata_items, num_allowed_metadata_items)
+def allowed_injected_files(context):
+ """Return the number of injected files allowed"""
+ return FLAGS.quota_max_injected_files
+
+
+def allowed_injected_file_content_bytes(context):
+ """Return the number of bytes allowed per injected file content"""
+ return FLAGS.quota_max_injected_file_content_bytes
+
+
+def allowed_injected_file_path_bytes(context):
+ """Return the number of bytes allowed in an injected file path"""
+ return FLAGS.quota_max_injected_file_path_bytes
+
+
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass
diff --git a/nova/rpc.py b/nova/rpc.py
index 205bb524a..b610cdf9b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -62,7 +62,7 @@ class Connection(carrot_connection.BrokerConnection):
params['backend_cls'] = fakerabbit.Backend
# NOTE(vish): magic is fun!
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
if new:
return cls(**params)
else:
@@ -74,7 +74,12 @@ class Connection(carrot_connection.BrokerConnection):
"""Recreates the connection instance
This is necessary to recover from some network errors/disconnects"""
- del cls._instance
+ try:
+ del cls._instance
+ except AttributeError, e:
+ # The _instance stuff is for testing purposes. Usually we don't use
+ # it. So don't freak out if it doesn't exist.
+ pass
return cls.instance()
@@ -91,18 +96,19 @@ class Consumer(messaging.Consumer):
super(Consumer, self).__init__(*args, **kwargs)
self.failed_connection = False
break
- except: # Catching all because carrot sucks
+ except Exception as e: # Catching all because carrot sucks
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
fl_intv = FLAGS.rabbit_retry_interval
- LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
- " unreachable. Trying again in %(fl_intv)d seconds.")
+ LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is"
+ " unreachable: %(e)s. Trying again in %(fl_intv)d"
+ " seconds.")
% locals())
self.failed_connection = True
if self.failed_connection:
- LOG.exception(_("Unable to connect to AMQP server "
- "after %d tries. Shutting down."),
- FLAGS.rabbit_max_retries)
+ LOG.error(_("Unable to connect to AMQP server "
+ "after %d tries. Shutting down."),
+ FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -113,7 +119,7 @@ class Consumer(messaging.Consumer):
if self.failed_connection:
# NOTE(vish): connection is defined in the parent class, we can
# recreate it as long as we create the backend too
- # pylint: disable-msg=W0201
+ # pylint: disable=W0201
self.connection = Connection.recreate()
self.backend = self.connection.create_backend()
self.declare()
@@ -122,11 +128,11 @@ class Consumer(messaging.Consumer):
LOG.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
- # exceptions to be logged 10 times a second if some
+ # want exceptions to be logged 10 times a second if some
# persistent failure occurs.
- except Exception: # pylint: disable-msg=W0703
+ except Exception, e: # pylint: disable=W0703
if not self.failed_connection:
- LOG.exception(_("Failed to fetch message from queue"))
+ LOG.exception(_("Failed to fetch message from queue: %s" % e))
self.failed_connection = True
def attach_to_eventlet(self):
@@ -136,24 +142,7 @@ class Consumer(messaging.Consumer):
return timer
-class Publisher(messaging.Publisher):
- """Publisher base class"""
- pass
-
-
-class TopicConsumer(Consumer):
- """Consumes messages on a specific topic"""
- exchange_type = "topic"
-
- def __init__(self, connection=None, topic="broadcast"):
- self.queue = topic
- self.routing_key = topic
- self.exchange = FLAGS.control_exchange
- self.durable = False
- super(TopicConsumer, self).__init__(connection=connection)
-
-
-class AdapterConsumer(TopicConsumer):
+class AdapterConsumer(Consumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
@@ -206,6 +195,41 @@ class AdapterConsumer(TopicConsumer):
return
+class Publisher(messaging.Publisher):
+ """Publisher base class"""
+ pass
+
+
+class TopicAdapterConsumer(AdapterConsumer):
+ """Consumes messages on a specific topic"""
+ exchange_type = "topic"
+
+ def __init__(self, connection=None, topic="broadcast", proxy=None):
+ self.queue = topic
+ self.routing_key = topic
+ self.exchange = FLAGS.control_exchange
+ self.durable = False
+ super(TopicAdapterConsumer, self).__init__(connection=connection,
+ topic=topic, proxy=proxy)
+
+
+class FanoutAdapterConsumer(AdapterConsumer):
+ """Consumes messages from a fanout exchange"""
+ exchange_type = "fanout"
+
+ def __init__(self, connection=None, topic="broadcast", proxy=None):
+ self.exchange = "%s_fanout" % topic
+ self.routing_key = topic
+ unique = uuid.uuid4().hex
+ self.queue = "%s_fanout_%s" % (topic, unique)
+ self.durable = False
+ LOG.info(_("Created '%(exchange)s' fanout exchange "
+ "with '%(key)s' routing key"),
+ dict(exchange=self.exchange, key=self.routing_key))
+ super(FanoutAdapterConsumer, self).__init__(connection=connection,
+ topic=topic, proxy=proxy)
+
+
class TopicPublisher(Publisher):
"""Publishes messages on a specific topic"""
exchange_type = "topic"
@@ -217,6 +241,19 @@ class TopicPublisher(Publisher):
super(TopicPublisher, self).__init__(connection=connection)
+class FanoutPublisher(Publisher):
+ """Publishes messages to a fanout exchange."""
+ exchange_type = "fanout"
+
+ def __init__(self, topic, connection=None):
+ self.exchange = "%s_fanout" % topic
+ self.queue = "%s_fanout" % topic
+ self.durable = False
+ LOG.info(_("Creating '%(exchange)s' fanout exchange"),
+ dict(exchange=self.exchange))
+ super(FanoutPublisher, self).__init__(connection=connection)
+
+
class DirectConsumer(Consumer):
"""Consumes messages directly on a channel specified by msg_id"""
exchange_type = "direct"
@@ -310,7 +347,7 @@ def _pack_context(msg, context):
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
- LOG.debug(_("Making asynchronous call..."))
+ LOG.debug(_("Making asynchronous call on %s ..."), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_("MSG_ID is %s") % (msg_id))
@@ -351,7 +388,7 @@ def call(context, topic, msg):
def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response"""
- LOG.debug(_("Making asynchronous cast..."))
+ LOG.debug(_("Making asynchronous cast on %s..."), topic)
_pack_context(msg, context)
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
@@ -359,6 +396,16 @@ def cast(context, topic, msg):
publisher.close()
+def fanout_cast(context, topic, msg):
+ """Sends a message on a fanout exchange without waiting for a response"""
+ LOG.debug(_("Making asynchronous fanout cast..."))
+ _pack_context(msg, context)
+ conn = Connection.instance()
+ publisher = FanoutPublisher(topic, connection=conn)
+ publisher.send(msg)
+ publisher.close()
+
+
def generic_response(message_data, message):
"""Logs a result and exits"""
LOG.debug(_('response %s'), message_data)
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
new file mode 100644
index 000000000..6bb3bf3cd
--- /dev/null
+++ b/nova/scheduler/api.py
@@ -0,0 +1,241 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to schedulers.
+"""
+
+import novaclient
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import rpc
+
+from eventlet import greenpool
+
+FLAGS = flags.FLAGS
+flags.DEFINE_bool('enable_zone_routing',
+ False,
+ 'When True, routing to child zones will occur.')
+
+LOG = logging.getLogger('nova.scheduler.api')
+
+
+def _call_scheduler(method, context, params=None):
+ """Generic handler for RPC calls to the scheduler.
+
+ :param params: Optional dictionary of arguments to be passed to the
+ scheduler worker
+
+ :retval: Result returned by scheduler worker
+ """
+ if not params:
+ params = {}
+ queue = FLAGS.scheduler_topic
+ kwargs = {'method': method, 'args': params}
+ return rpc.call(context, queue, kwargs)
+
+
+def get_zone_list(context):
+ """Return a list of zones assoicated with this zone."""
+ items = _call_scheduler('get_zone_list', context)
+ for item in items:
+ item['api_url'] = item['api_url'].replace('\\/', '/')
+ if not items:
+ items = db.zone_get_all(context)
+ return items
+
+
+def zone_get(context, zone_id):
+ return db.zone_get(context, zone_id)
+
+
+def zone_delete(context, zone_id):
+ return db.zone_delete(context, zone_id)
+
+
+def zone_create(context, data):
+ return db.zone_create(context, data)
+
+
+def zone_update(context, zone_id, data):
+ return db.zone_update(context, zone_id, data)
+
+
+def get_zone_capabilities(context, service=None):
+ """Returns a dict of key, value capabilities for this zone,
+ or for a particular class of services running in this zone."""
+ return _call_scheduler('get_zone_capabilities', context=context,
+ params=dict(service=service))
+
+
+def update_service_capabilities(context, service_name, host, capabilities):
+ """Send an update to all the scheduler services informing them
+ of the capabilities of this service."""
+ kwargs = dict(method='update_service_capabilities',
+ args=dict(service_name=service_name, host=host,
+ capabilities=capabilities))
+ return rpc.fanout_cast(context, 'scheduler', kwargs)
+
+
+def _wrap_method(function, self):
+ """Wrap method to supply self."""
+ def _wrap(*args, **kwargs):
+ return function(self, *args, **kwargs)
+ return _wrap
+
+
+def _process(func, zone):
+ """Worker stub for green thread pool. Give the worker
+ an authenticated nova client and zone info."""
+ nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ nova.authenticate()
+ return func(nova, zone)
+
+
+def child_zone_helper(zone_list, func):
+ """Fire off a command to each zone in the list.
+ The return is [novaclient return objects] from each child zone.
+ For example, if you are calling server.pause(), the list will
+ be whatever the response from server.pause() is. One entry
+ per child zone called."""
+ green_pool = greenpool.GreenPool()
+ return [result for result in green_pool.imap(
+ _wrap_method(_process, func), zone_list)]
+
+
+def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
+ """Use novaclient to issue command to a single child zone.
+ One of these will be run in parallel for each child zone."""
+ manager = getattr(nova, collection)
+ result = None
+ try:
+ try:
+ result = manager.get(int(item_id))
+ except ValueError, e:
+ result = manager.find(name=item_id)
+ except novaclient.NotFound:
+ url = zone.api_url
+ LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
+ locals()))
+ return None
+
+ if method_name.lower() not in ['get', 'find']:
+ result = getattr(result, method_name)()
+ return result
+
+
+def wrap_novaclient_function(f, collection, method_name, item_id):
+ """Appends collection, method_name and item_id to the incoming
+ (nova, zone) call from child_zone_helper."""
+ def inner(nova, zone):
+ return f(nova, zone, collection, method_name, item_id)
+
+ return inner
+
+
+class RedirectResult(exception.Error):
+ """Used to the HTTP API know that these results are pre-cooked
+ and they can be returned to the caller directly."""
+ def __init__(self, results):
+ self.results = results
+ super(RedirectResult, self).__init__(
+ message=_("Uncaught Zone redirection exception"))
+
+
+class reroute_compute(object):
+ """Decorator used to indicate that the method should
+ delegate the call the child zones if the db query
+ can't find anything."""
+ def __init__(self, method_name):
+ self.method_name = method_name
+
+ def __call__(self, f):
+ def wrapped_f(*args, **kwargs):
+ collection, context, item_id = \
+ self.get_collection_context_and_id(args, kwargs)
+ try:
+ # Call the original function ...
+ return f(*args, **kwargs)
+ except exception.InstanceNotFound, e:
+ LOG.debug(_("Instance %(item_id)s not found "
+ "locally: '%(e)s'" % locals()))
+
+ if not FLAGS.enable_zone_routing:
+ raise
+
+ zones = db.zone_get_all(context)
+ if not zones:
+ raise
+
+ # Ask the children to provide an answer ...
+ LOG.debug(_("Asking child zones ..."))
+ result = self._call_child_zones(zones,
+ wrap_novaclient_function(_issue_novaclient_command,
+ collection, self.method_name, item_id))
+ # Scrub the results and raise another exception
+ # so the API layers can bail out gracefully ...
+ raise RedirectResult(self.unmarshall_result(result))
+ return wrapped_f
+
+ def _call_child_zones(self, zones, function):
+ """Ask the child zones to perform this operation.
+ Broken out for testing."""
+ return child_zone_helper(zones, function)
+
+ def get_collection_context_and_id(self, args, kwargs):
+ """Returns a tuple of (novaclient collection name, security
+ context and resource id. Derived class should override this."""
+ context = kwargs.get('context', None)
+ instance_id = kwargs.get('instance_id', None)
+ if len(args) > 0 and not context:
+ context = args[1]
+ if len(args) > 1 and not instance_id:
+ instance_id = args[2]
+ return ("servers", context, instance_id)
+
+ def unmarshall_result(self, zone_responses):
+ """Result is a list of responses from each child zone.
+ Each decorator derivation is responsible to turning this
+ into a format expected by the calling method. For
+ example, this one is expected to return a single Server
+ dict {'server':{k:v}}. Others may return a list of them, like
+ {'servers':[{k,v}]}"""
+ reduced_response = []
+ for zone_response in zone_responses:
+ if not zone_response:
+ continue
+
+ server = zone_response.__dict__
+
+ for k in server.keys():
+ if k[0] == '_' or k == 'manager':
+ del server[k]
+
+ reduced_response.append(dict(server=server))
+ if reduced_response:
+ return reduced_response[0] # first for now.
+ return {}
+
+
+def redirect_handler(f):
+ def new_f(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except RedirectResult, e:
+ return e.results
+ return new_f
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 9deaa2777..f4461cee2 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -34,5 +34,7 @@ class ChanceScheduler(driver.Scheduler):
hosts = self.hosts_up(context, topic)
if not hosts:
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
return hosts[int(random.random() * len(hosts))]
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 66e46c1b9..ce05d9f6a 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -26,10 +26,14 @@ import datetime
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova.compute import power_state
FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
'maximum time since last checkin for up service')
+flags.DECLARE('instances_path', 'nova.compute.manager')
class NoValidHost(exception.Error):
@@ -45,6 +49,13 @@ class WillNotSchedule(exception.Error):
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""
+ def __init__(self):
+ self.zone_manager = None
+
+ def set_zone_manager(self, zone_manager):
+ """Called by the Scheduler Service to supply a ZoneManager."""
+ self.zone_manager = zone_manager
+
@staticmethod
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
@@ -64,3 +75,236 @@ class Scheduler(object):
def schedule(self, context, topic, *_args, **_kwargs):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
+
+ def schedule_live_migration(self, context, instance_id, dest):
+ """Live migration scheduling method.
+
+ :param context:
+ :param instance_id:
+ :param dest: destination host
+ :return:
+ The host where instance is running currently.
+ Then scheduler send request that host.
+
+ """
+
+ # Whether instance exists and is running.
+ instance_ref = db.instance_get(context, instance_id)
+
+ # Checking instance.
+ self._live_migration_src_check(context, instance_ref)
+
+ # Checking destination host.
+ self._live_migration_dest_check(context, instance_ref, dest)
+
+ # Common checking.
+ self._live_migration_common_check(context, instance_ref, dest)
+
+ # Changing instance_state.
+ db.instance_set_state(context,
+ instance_id,
+ power_state.PAUSED,
+ 'migrating')
+
+ # Changing volume state
+ for volume_ref in instance_ref['volumes']:
+ db.volume_update(context,
+ volume_ref['id'],
+ {'status': 'migrating'})
+
+ # Return value is necessary to send request to src
+ # Check _schedule() in detail.
+ src = instance_ref['host']
+ return src
+
+ def _live_migration_src_check(self, context, instance_ref):
+ """Live migration check routine (for src host).
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+
+ # Checking instance is running.
+ if (power_state.RUNNING != instance_ref['state'] or \
+ 'running' != instance_ref['state_description']):
+ ec2_id = instance_ref['hostname']
+ raise exception.Invalid(_('Instance(%s) is not running') % ec2_id)
+
+ # Checing volume node is running when any volumes are mounted
+ # to the instance.
+ if len(instance_ref['volumes']) != 0:
+ services = db.service_get_all_by_topic(context, 'volume')
+ if len(services) < 1 or not self.service_is_up(services[0]):
+ raise exception.Invalid(_("volume node is not alive"
+ "(time synchronize problem?)"))
+
+ # Checking src host exists and compute node
+ src = instance_ref['host']
+ services = db.service_get_all_compute_by_host(context, src)
+
+ # Checking src host is alive.
+ if not self.service_is_up(services[0]):
+ raise exception.Invalid(_("%s is not alive(time "
+ "synchronize problem?)") % src)
+
+ def _live_migration_dest_check(self, context, instance_ref, dest):
+ """Live migration check routine (for destination host).
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ # Checking dest exists and compute node.
+ dservice_refs = db.service_get_all_compute_by_host(context, dest)
+ dservice_ref = dservice_refs[0]
+
+ # Checking dest host is alive.
+ if not self.service_is_up(dservice_ref):
+ raise exception.Invalid(_("%s is not alive(time "
+ "synchronize problem?)") % dest)
+
+ # Checking whether The host where instance is running
+ # and dest is not same.
+ src = instance_ref['host']
+ if dest == src:
+ ec2_id = instance_ref['hostname']
+ raise exception.Invalid(_("%(dest)s is where %(ec2_id)s is "
+ "running now. choose other host.")
+ % locals())
+
+ # Checking dst host still has enough capacities.
+ self.assert_compute_node_has_enough_resources(context,
+ instance_ref,
+ dest)
+
+ def _live_migration_common_check(self, context, instance_ref, dest):
+ """Live migration common check routine.
+
+ Below checkings are followed by
+ http://wiki.libvirt.org/page/TodoPreMigrationChecks
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ # Checking shared storage connectivity
+ self.mounted_on_same_shared_storage(context, instance_ref, dest)
+
+ # Checking dest exists.
+ dservice_refs = db.service_get_all_compute_by_host(context, dest)
+ dservice_ref = dservice_refs[0]['compute_node'][0]
+
+ # Checking original host( where instance was launched at) exists.
+ try:
+ oservice_refs = db.service_get_all_compute_by_host(context,
+ instance_ref['launched_on'])
+ except exception.NotFound:
+ raise exception.Invalid(_("host %s where instance was launched "
+ "does not exist.")
+ % instance_ref['launched_on'])
+ oservice_ref = oservice_refs[0]['compute_node'][0]
+
+ # Checking hypervisor is same.
+ orig_hypervisor = oservice_ref['hypervisor_type']
+ dest_hypervisor = dservice_ref['hypervisor_type']
+ if orig_hypervisor != dest_hypervisor:
+ raise exception.Invalid(_("Different hypervisor type"
+ "(%(orig_hypervisor)s->"
+ "%(dest_hypervisor)s)')" % locals()))
+
+ # Checkng hypervisor version.
+ orig_hypervisor = oservice_ref['hypervisor_version']
+ dest_hypervisor = dservice_ref['hypervisor_version']
+ if orig_hypervisor > dest_hypervisor:
+ raise exception.Invalid(_("Older hypervisor version"
+ "(%(orig_hypervisor)s->"
+ "%(dest_hypervisor)s)") % locals())
+
+ # Checking cpuinfo.
+ try:
+ rpc.call(context,
+ db.queue_get_for(context, FLAGS.compute_topic, dest),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': oservice_ref['cpu_info']}})
+
+ except rpc.RemoteError:
+ src = instance_ref['host']
+ logging.exception(_("host %(dest)s is not compatible with "
+ "original host %(src)s.") % locals())
+ raise
+
+ def assert_compute_node_has_enough_resources(self, context,
+ instance_ref, dest):
+ """Checks if destination host has enough resource for live migration.
+
+ Currently, only memory checking has been done.
+ If storage migration(block migration, meaning live-migration
+ without any shared storage) will be available, local storage
+ checking is also necessary.
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ # Getting instance information
+ ec2_id = instance_ref['hostname']
+
+ # Getting host information
+ service_refs = db.service_get_all_compute_by_host(context, dest)
+ compute_node_ref = service_refs[0]['compute_node'][0]
+
+ mem_total = int(compute_node_ref['memory_mb'])
+ mem_used = int(compute_node_ref['memory_mb_used'])
+ mem_avail = mem_total - mem_used
+ mem_inst = instance_ref['memory_mb']
+ if mem_avail <= mem_inst:
+ raise exception.NotEmpty(_("Unable to migrate %(ec2_id)s "
+ "to destination: %(dest)s "
+ "(host:%(mem_avail)s "
+ "<= instance:%(mem_inst)s)")
+ % locals())
+
+ def mounted_on_same_shared_storage(self, context, instance_ref, dest):
+ """Check if the src and dest host mount same shared storage.
+
+ At first, dest host creates temp file, and src host can see
+ it if they mounts same shared storage. Then src host erase it.
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ src = instance_ref['host']
+ dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
+ src_t = db.queue_get_for(context, FLAGS.compute_topic, src)
+
+ try:
+ # create tmpfile at dest host
+ filename = rpc.call(context, dst_t,
+ {"method": 'create_shared_storage_test_file'})
+
+ # make sure existence at src host.
+ rpc.call(context, src_t,
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': filename}})
+
+ except rpc.RemoteError:
+ ipath = FLAGS.instances_path
+ logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
+ "same shared storage between %(src)s "
+ "and %(dest)s.") % locals())
+ raise
+
+ finally:
+ rpc.call(context, dst_t,
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': filename}})
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index e9b47512e..7d62cfc4e 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -29,6 +29,7 @@ from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
+from nova.scheduler import zone_manager
LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
@@ -40,15 +41,36 @@ flags.DEFINE_string('scheduler_driver',
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
def __init__(self, scheduler_driver=None, *args, **kwargs):
+ self.zone_manager = zone_manager.ZoneManager()
if not scheduler_driver:
scheduler_driver = FLAGS.scheduler_driver
self.driver = utils.import_object(scheduler_driver)
+ self.driver.set_zone_manager(self.zone_manager)
super(SchedulerManager, self).__init__(*args, **kwargs)
def __getattr__(self, key):
"""Converts all method calls to use the schedule method"""
return functools.partial(self._schedule, key)
+ def periodic_tasks(self, context=None):
+ """Poll child zones periodically to get status."""
+ self.zone_manager.ping(context)
+
+ def get_zone_list(self, context=None):
+ """Get a list of zones from the ZoneManager."""
+ return self.zone_manager.get_zone_list()
+
+ def get_zone_capabilities(self, context=None, service=None):
+ """Get the normalized set of capabilites for this zone,
+ or for a particular service."""
+ return self.zone_manager.get_zone_capabilities(context, service)
+
+ def update_service_capabilities(self, context=None, service_name=None,
+ host=None, capabilities={}):
+ """Process a capability update from a service node."""
+ self.zone_manager.update_service_capabilities(service_name,
+ host, capabilities)
+
def _schedule(self, method, context, topic, *args, **kwargs):
"""Tries to call schedule_* method on the driver to retrieve host.
@@ -67,3 +89,55 @@ class SchedulerManager(manager.Manager):
{"method": method,
"args": kwargs})
LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
+
+ # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
+ # Based on bexar design summit discussion,
+ # just put this here for bexar release.
+ def show_host_resources(self, context, host, *args):
+ """Shows the physical/usage resource given by hosts.
+
+ :param context: security context
+ :param host: hostname
+ :returns:
+ example format is below.
+ {'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
+ D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048}
+
+ """
+
+ compute_ref = db.service_get_all_compute_by_host(context, host)
+ compute_ref = compute_ref[0]
+
+ # Getting physical resource information
+ compute_node_ref = compute_ref['compute_node'][0]
+ resource = {'vcpus': compute_node_ref['vcpus'],
+ 'memory_mb': compute_node_ref['memory_mb'],
+ 'local_gb': compute_node_ref['local_gb'],
+ 'vcpus_used': compute_node_ref['vcpus_used'],
+ 'memory_mb_used': compute_node_ref['memory_mb_used'],
+ 'local_gb_used': compute_node_ref['local_gb_used']}
+
+ # Getting usage resource information
+ usage = {}
+ instance_refs = db.instance_get_all_by_host(context,
+ compute_ref['host'])
+ if not instance_refs:
+ return {'resource': resource, 'usage': usage}
+
+ project_ids = [i['project_id'] for i in instance_refs]
+ project_ids = list(set(project_ids))
+ for project_id in project_ids:
+ vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
+ host,
+ project_id)
+ mem = db.instance_get_memory_sum_by_host_and_project(context,
+ host,
+ project_id)
+ hdd = db.instance_get_disk_sum_by_host_and_project(context,
+ host,
+ project_id)
+ usage[project_id] = {'vcpus': int(vcpus),
+ 'memory_mb': int(mem),
+ 'local_gb': int(hdd)}
+
+ return {'resource': resource, 'usage': usage}
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 0191ceb3d..dd568d2c6 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -72,7 +72,9 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
@@ -107,7 +109,9 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
def schedule_set_network_host(self, context, *_args, **_kwargs):
"""Picks a host that is up and has the fewest networks."""
@@ -119,4 +123,6 @@ class SimpleScheduler(chance.ChanceScheduler):
raise driver.NoValidHost(_("All hosts have too many networks"))
if self.service_is_up(service):
return service['host']
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
diff --git a/nova/scheduler/zone.py b/nova/scheduler/zone.py
index 49786cd32..44d5a166f 100644
--- a/nova/scheduler/zone.py
+++ b/nova/scheduler/zone.py
@@ -52,5 +52,8 @@ class ZoneScheduler(driver.Scheduler):
zone = _kwargs.get('availability_zone')
hosts = self.hosts_up_with_zone(context, topic, zone)
if not hosts:
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
+
return hosts[int(random.random() * len(hosts))]
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
new file mode 100644
index 000000000..198f9d4cc
--- /dev/null
+++ b/nova/scheduler/zone_manager.py
@@ -0,0 +1,176 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+ZoneManager oversees all communications with child Zones.
+"""
+
+import novaclient
+import thread
+import traceback
+
+from datetime import datetime
+from eventlet import greenpool
+
+from nova import db
+from nova import flags
+from nova import log as logging
+
+FLAGS = flags.FLAGS
+flags.DEFINE_integer('zone_db_check_interval', 60,
+ 'Seconds between getting fresh zone info from db.')
+flags.DEFINE_integer('zone_failures_to_offline', 3,
+ 'Number of consecutive errors before marking zone offline')
+
+
+class ZoneState(object):
+ """Holds the state of all connected child zones."""
+ def __init__(self):
+ self.is_active = True
+ self.name = None
+ self.capabilities = None
+ self.attempt = 0
+ self.last_seen = datetime.min
+ self.last_exception = None
+ self.last_exception_time = None
+
+ def update_credentials(self, zone):
+ """Update zone credentials from db"""
+ self.zone_id = zone.id
+ self.api_url = zone.api_url
+ self.username = zone.username
+ self.password = zone.password
+
+ def update_metadata(self, zone_metadata):
+ """Update zone metadata after successful communications with
+ child zone."""
+ self.last_seen = datetime.now()
+ self.attempt = 0
+ self.name = zone_metadata.get("name", "n/a")
+ self.capabilities = ", ".join(["%s=%s" % (k, v)
+ for k, v in zone_metadata.iteritems() if k != 'name'])
+ self.is_active = True
+
+ def to_dict(self):
+ return dict(name=self.name, capabilities=self.capabilities,
+ is_active=self.is_active, api_url=self.api_url,
+ id=self.zone_id)
+
+ def log_error(self, exception):
+ """Something went wrong. Check to see if zone should be
+ marked as offline."""
+ self.last_exception = exception
+ self.last_exception_time = datetime.now()
+ api_url = self.api_url
+ logging.warning(_("'%(exception)s' error talking to "
+ "zone %(api_url)s") % locals())
+
+ max_errors = FLAGS.zone_failures_to_offline
+ self.attempt += 1
+ if self.attempt >= max_errors:
+ self.is_active = False
+ logging.error(_("No answer from zone %(api_url)s "
+ "after %(max_errors)d "
+ "attempts. Marking inactive.") % locals())
+
+
+def _call_novaclient(zone):
+ """Call novaclient. Broken out for testing purposes."""
+ client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ return client.zones.info()._info
+
+
+def _poll_zone(zone):
+ """Eventlet worker to poll a zone."""
+ logging.debug(_("Polling zone: %s") % zone.api_url)
+ try:
+ zone.update_metadata(_call_novaclient(zone))
+ except Exception, e:
+ zone.log_error(traceback.format_exc())
+
+
+class ZoneManager(object):
+ """Keeps the zone states updated."""
+ def __init__(self):
+ self.last_zone_db_check = datetime.min
+ self.zone_states = {} # { <zone_id> : ZoneState }
+ self.service_states = {} # { <service> : { <host> : { cap k : v }}}
+ self.green_pool = greenpool.GreenPool()
+
+ def get_zone_list(self):
+ """Return the list of zones we know about."""
+ return [zone.to_dict() for zone in self.zone_states.values()]
+
+ def get_zone_capabilities(self, context, service=None):
+ """Roll up all the individual host info to generic 'service'
+ capabilities. Each capability is aggregated into
+ <cap>_min and <cap>_max values."""
+ service_dict = self.service_states
+ if service:
+ service_dict = {service: self.service_states.get(service, {})}
+
+ # TODO(sandy) - be smarter about fabricating this structure.
+ # But it's likely to change once we understand what the Best-Match
+ # code will need better.
+ combined = {} # { <service>_<cap> : (min, max), ... }
+ for service_name, host_dict in service_dict.iteritems():
+ for host, caps_dict in host_dict.iteritems():
+ for cap, value in caps_dict.iteritems():
+ key = "%s_%s" % (service_name, cap)
+ min_value, max_value = combined.get(key, (value, value))
+ min_value = min(min_value, value)
+ max_value = max(max_value, value)
+ combined[key] = (min_value, max_value)
+
+ return combined
+
+ def _refresh_from_db(self, context):
+ """Make our zone state map match the db."""
+ # Add/update existing zones ...
+ zones = db.zone_get_all(context)
+ existing = self.zone_states.keys()
+ db_keys = []
+ for zone in zones:
+ db_keys.append(zone.id)
+ if zone.id not in existing:
+ self.zone_states[zone.id] = ZoneState()
+ self.zone_states[zone.id].update_credentials(zone)
+
+ # Cleanup zones removed from db ...
+ keys = self.zone_states.keys() # since we're deleting
+ for zone_id in keys:
+ if zone_id not in db_keys:
+ del self.zone_states[zone_id]
+
+ def _poll_zones(self, context):
+ """Try to connect to each child zone and get update."""
+ self.green_pool.imap(_poll_zone, self.zone_states.values())
+
+ def ping(self, context=None):
+ """Ping should be called periodically to update zone status."""
+ diff = datetime.now() - self.last_zone_db_check
+ if diff.seconds >= FLAGS.zone_db_check_interval:
+ logging.debug(_("Updating zone cache from db."))
+ self.last_zone_db_check = datetime.now()
+ self._refresh_from_db(context)
+ self._poll_zones(context)
+
+ def update_service_capabilities(self, service_name, host, capabilities):
+ """Update the per-service capabilities based on this notification."""
+ logging.debug(_("Received %(service_name)s service update from "
+ "%(host)s: %(capabilities)s") % locals())
+ service_caps = self.service_states.get(service_name, {})
+ service_caps[host] = capabilities
+ self.service_states[service_name] = service_caps
diff --git a/nova/service.py b/nova/service.py
index f47358089..47c0b96c0 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -39,6 +40,7 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import version
+from nova import wsgi
FLAGS = flags.FLAGS
@@ -48,6 +50,14 @@ flags.DEFINE_integer('report_interval', 10,
flags.DEFINE_integer('periodic_interval', 60,
'seconds between running periodic tasks',
lower_bound=1)
+flags.DEFINE_string('ec2_listen', "0.0.0.0",
+ 'IP address for EC2 API to listen')
+flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
+flags.DEFINE_string('osapi_listen', "0.0.0.0",
+ 'IP address for OpenStack API to listen')
+flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
+flags.DEFINE_string('api_paste_config', "api-paste.ini",
+ 'File name for the paste.deploy config for nova-api')
class Service(object):
@@ -82,20 +92,29 @@ class Service(object):
except exception.NotFound:
self._create_service_ref(ctxt)
+ if 'nova-compute' == self.binary:
+ self.manager.update_available_resource(ctxt)
+
conn1 = rpc.Connection.instance(new=True)
conn2 = rpc.Connection.instance(new=True)
+ conn3 = rpc.Connection.instance(new=True)
if self.report_interval:
- consumer_all = rpc.AdapterConsumer(
+ consumer_all = rpc.TopicAdapterConsumer(
connection=conn1,
topic=self.topic,
proxy=self)
- consumer_node = rpc.AdapterConsumer(
+ consumer_node = rpc.TopicAdapterConsumer(
connection=conn2,
topic='%s.%s' % (self.topic, self.host),
proxy=self)
+ fanout = rpc.FanoutAdapterConsumer(
+ connection=conn3,
+ topic=self.topic,
+ proxy=self)
self.timers.append(consumer_all.attach_to_eventlet())
self.timers.append(consumer_node.attach_to_eventlet())
+ self.timers.append(fanout.attach_to_eventlet())
pulse = utils.LoopingCall(self.report_state)
pulse.start(interval=self.report_interval, now=False)
@@ -204,12 +223,47 @@ class Service(object):
logging.error(_("Recovered model server connection!"))
# TODO(vish): this should probably only catch connection errors
- except Exception: # pylint: disable-msg=W0702
+ except Exception: # pylint: disable=W0702
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception(_("model server went away"))
+class WsgiService(object):
+ """Base class for WSGI based services.
+
+ For each api you define, you must also define these flags:
+ :<api>_listen: The address on which to listen
+ :<api>_listen_port: The port on which to listen
+ """
+
+ def __init__(self, conf, apis):
+ self.conf = conf
+ self.apis = apis
+ self.wsgi_app = None
+
+ def start(self):
+ self.wsgi_app = _run_wsgi(self.conf, self.apis)
+
+ def wait(self):
+ self.wsgi_app.wait()
+
+
+class ApiService(WsgiService):
+ """Class for our nova-api service"""
+ @classmethod
+ def create(cls, conf=None):
+ if not conf:
+ conf = wsgi.paste_config_file(FLAGS.api_paste_config)
+ if not conf:
+ message = (_("No paste configuration found for: %s"),
+ FLAGS.api_paste_config)
+ raise exception.Error(message)
+ api_endpoints = ['ec2', 'osapi']
+ service = cls(conf, api_endpoints)
+ return service
+
+
def serve(*services):
try:
if not services:
@@ -239,3 +293,46 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
+
+
+def serve_wsgi(cls, conf=None):
+ try:
+ service = cls.create(conf)
+ except Exception:
+ logging.exception('in WsgiService.create()')
+ raise
+ finally:
+ # After we've loaded up all our dynamic bits, check
+ # whether we should print help
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
+
+ service.start()
+
+ return service
+
+
+def _run_wsgi(paste_config_file, apis):
+ logging.debug(_("Using paste.deploy config at: %s"), paste_config_file)
+ apps = []
+ for api in apis:
+ config = wsgi.load_paste_configuration(paste_config_file, api)
+ if config is None:
+ logging.debug(_("No paste configuration for app: %s"), api)
+ continue
+ logging.debug(_("App Config: %(api)s\n%(config)r") % locals())
+ logging.info(_("Running %s API"), api)
+ app = wsgi.load_paste_app(paste_config_file, api)
+ apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
+ getattr(FLAGS, "%s_listen" % api)))
+ if len(apps) == 0:
+ logging.error(_("No known API applications configured in %s."),
+ paste_config_file)
+ return
+
+ server = wsgi.Server()
+ for app in apps:
+ server.start(*app)
+ return server
diff --git a/nova/test.py b/nova/test.py
index d8a47464f..3b608520a 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -24,6 +24,7 @@ and some black magic for inline callbacks.
import datetime
+import functools
import os
import shutil
import uuid
@@ -32,6 +33,7 @@ import unittest
import mox
import shutil
import stubout
+from eventlet import greenthread
from nova import context
from nova import db
@@ -39,6 +41,7 @@ from nova import fakerabbit
from nova import flags
from nova import rpc
from nova import service
+from nova import wsgi
FLAGS = flags.FLAGS
@@ -79,6 +82,7 @@ class TestCase(unittest.TestCase):
self.injected = []
self._services = []
self._monkey_patch_attach()
+ self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
def tearDown(self):
@@ -99,7 +103,8 @@ class TestCase(unittest.TestCase):
self.reset_flags()
# Reset our monkey-patches
- rpc.Consumer.attach_to_eventlet = self.originalAttach
+ rpc.Consumer.attach_to_eventlet = self.original_attach
+ wsgi.Server.start = self.original_start
# Stop any timers
for x in self.injected:
@@ -141,12 +146,90 @@ class TestCase(unittest.TestCase):
return svc
def _monkey_patch_attach(self):
- self.originalAttach = rpc.Consumer.attach_to_eventlet
+ self.original_attach = rpc.Consumer.attach_to_eventlet
- def _wrapped(innerSelf):
- rv = self.originalAttach(innerSelf)
+ def _wrapped(inner_self):
+ rv = self.original_attach(inner_self)
self.injected.append(rv)
return rv
- _wrapped.func_name = self.originalAttach.func_name
+ _wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
+
+ def _monkey_patch_wsgi(self):
+ """Allow us to kill servers spawned by wsgi.Server."""
+ # TODO(termie): change these patterns to use functools
+ self.original_start = wsgi.Server.start
+
+ @functools.wraps(self.original_start)
+ def _wrapped_start(inner_self, *args, **kwargs):
+ original_spawn_n = inner_self.pool.spawn_n
+
+ @functools.wraps(original_spawn_n)
+ def _wrapped_spawn_n(*args, **kwargs):
+ rv = greenthread.spawn(*args, **kwargs)
+ self._services.append(rv)
+
+ inner_self.pool.spawn_n = _wrapped_spawn_n
+ self.original_start(inner_self, *args, **kwargs)
+ inner_self.pool.spawn_n = original_spawn_n
+
+ _wrapped_start.func_name = self.original_start.func_name
+ wsgi.Server.start = _wrapped_start
+
+ # Useful assertions
+ def assertDictMatch(self, d1, d2):
+ """Assert two dicts are equivalent.
+
+ This is a 'deep' match in the sense that it handles nested
+ dictionaries appropriately.
+
+ NOTE:
+
+ If you don't care (or don't know) a given value, you can specify
+ the string DONTCARE as the value. This will cause that dict-item
+ to be skipped.
+ """
+ def raise_assertion(msg):
+ d1str = str(d1)
+ d2str = str(d2)
+ base_msg = ("Dictionaries do not match. %(msg)s d1: %(d1str)s "
+ "d2: %(d2str)s" % locals())
+ raise AssertionError(base_msg)
+
+ d1keys = set(d1.keys())
+ d2keys = set(d2.keys())
+ if d1keys != d2keys:
+ d1only = d1keys - d2keys
+ d2only = d2keys - d1keys
+ raise_assertion("Keys in d1 and not d2: %(d1only)s. "
+ "Keys in d2 and not d1: %(d2only)s" % locals())
+
+ for key in d1keys:
+ d1value = d1[key]
+ d2value = d2[key]
+ if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
+ self.assertDictMatch(d1value, d2value)
+ elif 'DONTCARE' in (d1value, d2value):
+ continue
+ elif d1value != d2value:
+ raise_assertion("d1['%(key)s']=%(d1value)s != "
+ "d2['%(key)s']=%(d2value)s" % locals())
+
+ def assertDictListMatch(self, L1, L2):
+ """Assert a list of dicts are equivalent"""
+ def raise_assertion(msg):
+ L1str = str(L1)
+ L2str = str(L2)
+ base_msg = ("List of dictionaries do not match: %(msg)s "
+ "L1: %(L1str)s L2: %(L2str)s" % locals())
+ raise AssertionError(base_msg)
+
+ L1count = len(L1)
+ L2count = len(L2)
+ if L1count != L2count:
+ raise_assertion("Length mismatch: len(L1)=%(L1count)d != "
+ "len(L2)=%(L2count)d" % locals())
+
+ for d1, d2 in zip(L1, L2):
+ self.assertDictMatch(d1, d2)
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index e18120285..bac7181f7 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -20,7 +20,7 @@ from nova import test
from nova import context
from nova import flags
-from nova.api.openstack.ratelimiting import RateLimitingMiddleware
+from nova.api.openstack.limits import RateLimitingMiddleware
from nova.api.openstack.common import limited
from nova.tests.api.openstack import fakes
from webob import Request
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py
new file mode 100644
index 000000000..74bb8729a
--- /dev/null
+++ b/nova/tests/api/openstack/common.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import webob
+
+
+def webob_factory(url):
+ """Factory for removing duplicate webob code from tests"""
+
+ base_url = url
+
+ def web_request(url, method=None, body=None):
+ req = webob.Request.blank("%s%s" % (base_url, url))
+ if method:
+ req.content_type = "application/json"
+ req.method = method
+ if body:
+ req.body = json.dumps(body)
+ return req
+ return web_request
diff --git a/nova/tests/api/openstack/extensions/__init__.py b/nova/tests/api/openstack/extensions/__init__.py
new file mode 100644
index 000000000..848908a95
--- /dev/null
+++ b/nova/tests/api/openstack/extensions/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py
new file mode 100644
index 000000000..0860b51ac
--- /dev/null
+++ b/nova/tests/api/openstack/extensions/foxinsocks.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from nova import wsgi
+
+from nova.api.openstack import extensions
+
+
+class FoxInSocksController(wsgi.Controller):
+
+ def index(self, req):
+ return "Try to say this Mr. Knox, sir..."
+
+
+class Foxinsocks(object):
+
+ def __init__(self):
+ pass
+
+ def get_name(self):
+ return "Fox In Socks"
+
+ def get_alias(self):
+ return "FOXNSOX"
+
+ def get_description(self):
+ return "The Fox In Socks Extension"
+
+ def get_namespace(self):
+ return "http://www.fox.in.socks/api/ext/pie/v1.0"
+
+ def get_updated(self):
+ return "2011-01-22T13:25:27-06:00"
+
+ def get_resources(self):
+ resources = []
+ resource = extensions.ResourceExtension('foxnsocks',
+ FoxInSocksController())
+ resources.append(resource)
+ return resources
+
+ def get_actions(self):
+ actions = []
+ actions.append(extensions.ActionExtension('servers', 'add_tweedle',
+ self._add_tweedle))
+ actions.append(extensions.ActionExtension('servers', 'delete_tweedle',
+ self._delete_tweedle))
+ return actions
+
+ def get_response_extensions(self):
+ response_exts = []
+
+ def _goose_handler(res):
+ #NOTE: This only handles JSON responses.
+ # You can use content type header to test for XML.
+ data = json.loads(res.body)
+ data['flavor']['googoose'] = "Gooey goo for chewy chewing!"
+ return data
+
+ resp_ext = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)',
+ _goose_handler)
+ response_exts.append(resp_ext)
+
+ def _bands_handler(res):
+ #NOTE: This only handles JSON responses.
+ # You can use content type header to test for XML.
+ data = json.loads(res.body)
+ data['big_bands'] = 'Pig Bands!'
+ return data
+
+ resp_ext2 = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)',
+ _bands_handler)
+ response_exts.append(resp_ext2)
+ return response_exts
+
+ def _add_tweedle(self, input_dict, req, id):
+
+ return "Tweedle Beetle Added."
+
+ def _delete_tweedle(self, input_dict, req, id):
+
+ return "Tweedle Beetle Deleted."
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index fb282f1c9..8b0729c35 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import datetime
import json
import random
@@ -25,8 +26,8 @@ import webob.dec
from paste import urlmap
from glance import client as glance_client
+from glance.common import exception as glance_exc
-from nova import auth
from nova import context
from nova import exception as exc
from nova import flags
@@ -34,7 +35,9 @@ from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
-from nova.api.openstack import ratelimiting
+from nova.api.openstack import versions
+from nova.api.openstack import limits
+from nova.auth.manager import User, Project
from nova.image import glance
from nova.image import local
from nova.image import service
@@ -68,26 +71,36 @@ def fake_auth_init(self, application):
@webob.dec.wsgify
def fake_wsgi(self, req):
req.environ['nova.context'] = context.RequestContext(1, 1)
- if req.body:
- req.environ['inst_dict'] = json.loads(req.body)
return self.application
-def wsgi_app(inner_application=None):
- if not inner_application:
- inner_application = openstack.APIRouter()
+def wsgi_app(inner_app10=None, inner_app11=None):
+ if not inner_app10:
+ inner_app10 = openstack.APIRouterV10()
+ if not inner_app11:
+ inner_app11 = openstack.APIRouterV11()
mapper = urlmap.URLMap()
- api = openstack.FaultWrapper(auth.AuthMiddleware(
- ratelimiting.RateLimitingMiddleware(inner_application)))
- mapper['/v1.0'] = api
- mapper['/'] = openstack.FaultWrapper(openstack.Versions())
+ api10 = openstack.FaultWrapper(auth.AuthMiddleware(
+ limits.RateLimitingMiddleware(inner_app10)))
+ api11 = openstack.FaultWrapper(auth.AuthMiddleware(
+ limits.RateLimitingMiddleware(inner_app11)))
+ mapper['/v1.0'] = api10
+ mapper['/v1.1'] = api11
+ mapper['/'] = openstack.FaultWrapper(versions.Versions())
return mapper
-def stub_out_key_pair_funcs(stubs):
+def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
- stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+
+ def no_key_pair(context, user_id):
+ return []
+
+ if have_key_pair:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+ else:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_image_service(stubs):
@@ -109,13 +122,13 @@ def stub_out_auth(stubs):
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
- super(ratelimiting.RateLimitingMiddleware, self).__init__(app)
+ super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
- stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
+ stubs.Set(nova.api.openstack.limits.RateLimitingMiddleware,
'__init__', fake_rate_init)
- stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
+ stubs.Set(nova.api.openstack.limits.RateLimitingMiddleware,
'__call__', fake_wsgi)
@@ -131,6 +144,21 @@ def stub_out_compute_api_snapshot(stubs):
stubs.Set(nova.compute.API, 'snapshot', snapshot)
+def stub_out_glance_add_image(stubs, sent_to_glance):
+ """
+ We return the metadata sent to glance by modifying the sent_to_glance dict
+ in place.
+ """
+ orig_add_image = glance_client.Client.add_image
+
+ def fake_add_image(context, metadata, data=None):
+ sent_to_glance['metadata'] = metadata
+ sent_to_glance['data'] = data
+ return orig_add_image(metadata, data)
+
+ stubs.Set(glance_client.Client, 'add_image', fake_add_image)
+
+
def stub_out_glance(stubs, initial_fixtures=None):
class FakeGlanceClient:
@@ -143,36 +171,46 @@ def stub_out_glance(stubs, initial_fixtures=None):
for f in self.fixtures]
def fake_get_images_detailed(self):
- return self.fixtures
+ return copy.deepcopy(self.fixtures)
def fake_get_image_meta(self, image_id):
- for f in self.fixtures:
- if f['id'] == image_id:
- return f
- return None
-
- def fake_add_image(self, image_meta):
- id = ''.join(random.choice(string.letters) for _ in range(20))
- image_meta['id'] = id
+ image = self._find_image(image_id)
+ if image:
+ return copy.deepcopy(image)
+ raise glance_exc.NotFound
+
+ def fake_add_image(self, image_meta, data=None):
+ image_meta = copy.deepcopy(image_meta)
+ image_id = ''.join(random.choice(string.letters)
+ for _ in range(20))
+ image_meta['id'] = image_id
self.fixtures.append(image_meta)
- return id
+ return copy.deepcopy(image_meta)
+
+ def fake_update_image(self, image_id, image_meta, data=None):
+ for attr in ('created_at', 'updated_at', 'deleted_at', 'deleted'):
+ if attr in image_meta:
+ del image_meta[attr]
- def fake_update_image(self, image_id, image_meta):
- f = self.fake_get_image_meta(image_id)
+ f = self._find_image(image_id)
if not f:
- raise exc.NotFound
+ raise glance_exc.NotFound
f.update(image_meta)
+ return copy.deepcopy(f)
def fake_delete_image(self, image_id):
- f = self.fake_get_image_meta(image_id)
+ f = self._find_image(image_id)
if not f:
- raise exc.NotFound
+ raise glance_exc.NotFound
self.fixtures.remove(f)
- ##def fake_delete_all(self):
- ## self.fixtures = []
+ def _find_image(self, image_id):
+ for f in self.fixtures:
+ if f['id'] == image_id:
+ return f
+ return None
GlanceClient = glance_client.Client
fake = FakeGlanceClient(initial_fixtures)
@@ -184,11 +222,15 @@ def stub_out_glance(stubs, initial_fixtures=None):
stubs.Set(GlanceClient, 'add_image', fake.fake_add_image)
stubs.Set(GlanceClient, 'update_image', fake.fake_update_image)
stubs.Set(GlanceClient, 'delete_image', fake.fake_delete_image)
- #stubs.Set(GlanceClient, 'delete_all', fake.fake_delete_all)
class FakeToken(object):
+ # FIXME(sirp): let's not use id here
+ id = 0
+
def __init__(self, **kwargs):
+ FakeToken.id += 1
+ self.id = FakeToken.id
for k, v in kwargs.iteritems():
setattr(self, k, v)
@@ -203,38 +245,121 @@ class FakeAuthDatabase(object):
data = {}
@staticmethod
- def auth_get_token(context, token_hash):
+ def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
- def auth_create_token(context, token):
+ def auth_token_create(context, token):
fake_token = FakeToken(created_at=datetime.datetime.now(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
+ FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
- def auth_destroy_token(context, token):
- if token.token_hash in FakeAuthDatabase.data:
- del FakeAuthDatabase.data['token_hash']
+ def auth_token_destroy(context, token_id):
+ token = FakeAuthDatabase.data.get('id_%i' % token_id)
+ if token and token.token_hash in FakeAuthDatabase.data:
+ del FakeAuthDatabase.data[token.token_hash]
+ del FakeAuthDatabase.data['id_%i' % token_id]
class FakeAuthManager(object):
- auth_data = {}
-
- def add_user(self, key, user):
- FakeAuthManager.auth_data[key] = user
+ #NOTE(justinsb): Accessing static variables through instances is FUBAR
+ #NOTE(justinsb): This should also be private!
+ auth_data = []
+ projects = {}
+
+ @classmethod
+ def clear_fakes(cls):
+ cls.auth_data = []
+ cls.projects = {}
+
+ @classmethod
+ def reset_fake_data(cls):
+ u1 = User('id1', 'guy1', 'acc1', 'secret1', False)
+ cls.auth_data = [u1]
+ cls.projects = dict(testacct=Project('testacct',
+ 'testacct',
+ 'id1',
+ 'test',
+ []))
+
+ def add_user(self, user):
+ FakeAuthManager.auth_data.append(user)
+
+ def get_users(self):
+ return FakeAuthManager.auth_data
def get_user(self, uid):
- for k, v in FakeAuthManager.auth_data.iteritems():
- if v.id == uid:
- return v
+ for user in FakeAuthManager.auth_data:
+ if user.id == uid:
+ return user
return None
- def get_project(self, pid):
+ def get_user_from_access_key(self, key):
+ for user in FakeAuthManager.auth_data:
+ if user.access == key:
+ return user
return None
- def get_user_from_access_key(self, key):
- return FakeAuthManager.auth_data.get(key, None)
+ def delete_user(self, uid):
+ for user in FakeAuthManager.auth_data:
+ if user.id == uid:
+ FakeAuthManager.auth_data.remove(user)
+ return None
+
+ def create_user(self, name, access=None, secret=None, admin=False):
+ u = User(name, name, access, secret, admin)
+ FakeAuthManager.auth_data.append(u)
+ return u
+
+ def modify_user(self, user_id, access=None, secret=None, admin=None):
+ user = self.get_user(user_id)
+ if user:
+ user.access = access
+ user.secret = secret
+ if admin is not None:
+ user.admin = admin
+
+ def is_admin(self, user):
+ return user.admin
+
+ def is_project_member(self, user, project):
+ return ((user.id in project.member_ids) or
+ (user.id == project.project_manager_id))
+
+ def create_project(self, name, manager_user, description=None,
+ member_users=None):
+ member_ids = [User.safe_id(m) for m in member_users] \
+ if member_users else []
+ p = Project(name, name, User.safe_id(manager_user),
+ description, member_ids)
+ FakeAuthManager.projects[name] = p
+ return p
+
+ def delete_project(self, pid):
+ if pid in FakeAuthManager.projects:
+ del FakeAuthManager.projects[pid]
+
+ def modify_project(self, project, manager_user=None, description=None):
+ p = FakeAuthManager.projects.get(project)
+ p.project_manager_id = User.safe_id(manager_user)
+ p.description = description
+
+ def get_project(self, pid):
+ p = FakeAuthManager.projects.get(pid)
+ if p:
+ return p
+ else:
+ raise exc.NotFound
+
+ def get_projects(self, user=None):
+ if not user:
+ return FakeAuthManager.projects.values()
+ else:
+ return [p for p in FakeAuthManager.projects.values()
+ if (user.id in p.member_ids) or
+ (user.id == p.project_manager_id)]
class FakeRateLimiter(object):
diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py
new file mode 100644
index 000000000..64abcf48c
--- /dev/null
+++ b/nova/tests/api/openstack/test_accounts.py
@@ -0,0 +1,123 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import json
+
+import stubout
+import webob
+
+from nova import flags
+from nova import test
+from nova.api.openstack import accounts
+from nova.auth.manager import User
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def fake_init(self):
+ self.manager = fakes.FakeAuthManager()
+
+
+def fake_admin_check(self, req):
+ return True
+
+
+class AccountsTest(test.TestCase):
+ def setUp(self):
+ super(AccountsTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(accounts.Controller, '__init__',
+ fake_init)
+ self.stubs.Set(accounts.Controller, '_check_admin',
+ fake_admin_check)
+ fakes.FakeAuthManager.clear_fakes()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+ fakemgr = fakes.FakeAuthManager()
+ joeuser = User('id1', 'guy1', 'acc1', 'secret1', False)
+ superuser = User('id2', 'guy2', 'acc2', 'secret2', True)
+ fakemgr.add_user(joeuser)
+ fakemgr.add_user(superuser)
+ fakemgr.create_project('test1', joeuser)
+ fakemgr.create_project('test2', superuser)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(AccountsTest, self).tearDown()
+
+ def test_get_account(self):
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['account']['id'], 'test1')
+ self.assertEqual(res_dict['account']['name'], 'test1')
+ self.assertEqual(res_dict['account']['manager'], 'id1')
+ self.assertEqual(res.status_int, 200)
+
+ def test_account_delete(self):
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertTrue('test1' not in fakes.FakeAuthManager.projects)
+ self.assertEqual(res.status_int, 200)
+
+ def test_account_create(self):
+ body = dict(account=dict(description='test account',
+ manager='id1'))
+ req = webob.Request.blank('/v1.0/accounts/newacct')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['account']['id'], 'newacct')
+ self.assertEqual(res_dict['account']['name'], 'newacct')
+ self.assertEqual(res_dict['account']['description'], 'test account')
+ self.assertEqual(res_dict['account']['manager'], 'id1')
+ self.assertTrue('newacct' in
+ fakes.FakeAuthManager.projects)
+ self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 3)
+
+ def test_account_update(self):
+ body = dict(account=dict(description='test account',
+ manager='id2'))
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['account']['id'], 'test1')
+ self.assertEqual(res_dict['account']['name'], 'test1')
+ self.assertEqual(res_dict['account']['description'], 'test account')
+ self.assertEqual(res_dict['account']['manager'], 'id2')
+ self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 2)
diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py
index dfce1b127..e87255b18 100644
--- a/nova/tests/api/openstack/test_adminapi.py
+++ b/nova/tests/api/openstack/test_adminapi.py
@@ -23,7 +23,6 @@ from paste import urlmap
from nova import flags
from nova import test
from nova.api import openstack
-from nova.api.openstack import ratelimiting
from nova.api.openstack import auth
from nova.tests.api.openstack import fakes
@@ -35,7 +34,7 @@ class AdminAPITest(test.TestCase):
def setUp(self):
super(AdminAPITest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index 13f6c3a1c..8f189c744 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -26,6 +26,7 @@ import nova.api.openstack.auth
import nova.auth.manager
from nova import auth
from nova import context
+from nova import db
from nova import test
from nova.tests.api.openstack import fakes
@@ -38,7 +39,7 @@ class Test(test.TestCase):
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.clear_fakes()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_networking(self.stubs)
@@ -50,11 +51,12 @@ class Test(test.TestCase):
def test_authorize_user(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
@@ -64,11 +66,13 @@ class Test(test.TestCase):
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
+ f.create_project('user1_project', user)
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
@@ -79,8 +83,7 @@ class Test(test.TestCase):
self.assertEqual(result.headers['X-Storage-Url'], "")
token = result.headers['X-Auth-Token']
- self.stubs.Set(nova.api.openstack, 'APIRouter',
- fakes.FakeRouter)
+ self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
result = req.get_response(fakes.wsgi_app())
@@ -89,7 +92,7 @@ class Test(test.TestCase):
def test_token_expiry(self):
self.destroy_called = False
- token_hash = 'bacon'
+ token_hash = 'token_hash'
def destroy_token_mock(meh, context, token):
self.destroy_called = True
@@ -99,22 +102,33 @@ class Test(test.TestCase):
token_hash=token_hash,
created_at=datetime.datetime(1990, 1, 1))
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_destroy',
destroy_token_mock)
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_get_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_get',
bad_token)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'bacon'
+ req.headers['X-Auth-Token'] = 'token_hash'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
self.assertEqual(self.destroy_called, True)
- def test_bad_user(self):
+ def test_bad_user_bad_key(self):
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'unknown_user'
+ req.headers['X-Auth-Key'] = 'unknown_user_key'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_bad_user_good_key(self):
+ f = fakes.FakeAuthManager()
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
+
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-User'] = 'unknown_user'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
@@ -125,7 +139,34 @@ class Test(test.TestCase):
def test_bad_token(self):
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'baconbaconbacon'
+ req.headers['X-Auth-Token'] = 'unknown_token'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+
+class TestFunctional(test.TestCase):
+ def test_token_expiry(self):
+ ctx = context.get_admin_context()
+ tok = db.auth_token_create(ctx, dict(
+ token_hash='test_token_hash',
+ cdn_management_url='',
+ server_management_url='',
+ storage_url='',
+ user_id='user1',
+ ))
+
+ db.auth_token_update(ctx, tok.token_hash, dict(
+ created_at=datetime.datetime(2000, 1, 1, 12, 0, 0),
+ ))
+
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'test_token_hash'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_token_doesnotexist(self):
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'nonexistant_token_hash'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
@@ -137,7 +178,7 @@ class TestLimiter(test.TestCase):
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.clear_fakes()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
@@ -148,17 +189,18 @@ class TestLimiter(test.TestCase):
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
+ f.create_project('test', user)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
token = result.headers['X-Auth-Token']
- self.stubs.Set(nova.api.openstack, 'APIRouter',
- fakes.FakeRouter)
+ self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake')
req.method = 'POST'
req.headers['X-Auth-Token'] = token
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 59d850157..8f57c5b67 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -19,6 +19,7 @@
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
+import webob.exc
from webob import Request
@@ -78,20 +79,14 @@ class LimiterTest(test.TestCase):
Test offset key works with a blank offset.
"""
req = Request.blank('/?offset=')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""
Test offset key works with a BAD offset.
"""
req = Request.blank(u'/?offset=\u0020aa')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_nothing(self):
"""
@@ -160,3 +155,17 @@ class LimiterTest(test.TestCase):
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(limited(items, req, max_limit=2000), [])
+
+ def test_limiter_negative_limit(self):
+ """
+ Test a negative limit.
+ """
+ req = Request.blank('/?limit=-3000')
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+
+ def test_limiter_negative_offset(self):
+ """
+ Test a negative offset.
+ """
+ req = Request.blank('/?offset=-30')
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
new file mode 100644
index 000000000..481d34ed1
--- /dev/null
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -0,0 +1,236 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+import os.path
+
+from nova import context
+from nova import flags
+from nova.api import openstack
+from nova.api.openstack import extensions
+from nova.api.openstack import flavors
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+FLAGS = flags.FLAGS
+
+response_body = "Try to say this Mr. Knox, sir..."
+
+
+class StubController(nova.wsgi.Controller):
+
+ def __init__(self, body):
+ self.body = body
+
+ def index(self, req):
+ return self.body
+
+
+class StubExtensionManager(object):
+
+ def __init__(self, resource_ext=None, action_ext=None, response_ext=None):
+ self.resource_ext = resource_ext
+ self.action_ext = action_ext
+ self.response_ext = response_ext
+
+ def get_name(self):
+ return "Tweedle Beetle Extension"
+
+ def get_alias(self):
+ return "TWDLBETL"
+
+ def get_description(self):
+ return "Provides access to Tweedle Beetles"
+
+ def get_resources(self):
+ resource_exts = []
+ if self.resource_ext:
+ resource_exts.append(self.resource_ext)
+ return resource_exts
+
+ def get_actions(self):
+ action_exts = []
+ if self.action_ext:
+ action_exts.append(self.action_ext)
+ return action_exts
+
+ def get_response_extensions(self):
+ response_exts = []
+ if self.response_ext:
+ response_exts.append(self.response_ext)
+ return response_exts
+
+
+class ExtensionControllerTest(unittest.TestCase):
+
+ def test_index(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/extensions")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+
+ def test_get_by_alias(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/extensions/FOXNSOX")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+
+
+class ResourceExtensionTest(unittest.TestCase):
+
+ def test_no_extension_present(self):
+ manager = StubExtensionManager(None)
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/blah")
+ response = request.get_response(ext_midware)
+ self.assertEqual(404, response.status_int)
+
+ def test_get_resources(self):
+ res_ext = extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/tweedles")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_get_resources_with_controller(self):
+ res_ext = extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/tweedles")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+
+class ExtensionManagerTest(unittest.TestCase):
+
+ response_body = "Try to say this Mr. Knox, sir..."
+
+ def setUp(self):
+ FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
+ "extensions")
+
+ def test_get_resources(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/foxnsocks")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+
+class ActionExtensionTest(unittest.TestCase):
+
+ def setUp(self):
+ FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
+ "extensions")
+
+ def _send_server_action_request(self, url, body):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank(url)
+ request.method = 'POST'
+ request.content_type = 'application/json'
+ request.body = json.dumps(body)
+ response = request.get_response(ext_midware)
+ return response
+
+ def test_extended_action(self):
+ body = dict(add_tweedle=dict(name="test"))
+ response = self._send_server_action_request("/servers/1/action", body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Added.", response.body)
+
+ body = dict(delete_tweedle=dict(name="test"))
+ response = self._send_server_action_request("/servers/1/action", body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Deleted.", response.body)
+
+ def test_invalid_action_body(self):
+ body = dict(blah=dict(name="test")) # Doesn't exist
+ response = self._send_server_action_request("/servers/1/action", body)
+ self.assertEqual(501, response.status_int)
+
+ def test_invalid_action(self):
+ body = dict(blah=dict(name="test"))
+ response = self._send_server_action_request("/asdf/1/action", body)
+ self.assertEqual(404, response.status_int)
+
+
+class ResponseExtensionTest(unittest.TestCase):
+
+ def setUp(self):
+ super(ResponseExtensionTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(ResponseExtensionTest, self).tearDown()
+
+ def test_get_resources_with_stub_mgr(self):
+
+ test_resp = "Gooey goo for chewy chewing!"
+
+ def _resp_handler(res):
+ # only handle JSON responses
+ data = json.loads(res.body)
+ data['flavor']['googoose'] = test_resp
+ return data
+
+ resp_ext = extensions.ResponseExtension('GET',
+ '/v1.1/flavors/:(id)',
+ _resp_handler)
+
+ manager = StubExtensionManager(None, None, resp_ext)
+ app = fakes.wsgi_app()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/v1.1/flavors/1")
+ request.environ['api.version'] = '1.1'
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ response_data = json.loads(response.body)
+ self.assertEqual(test_resp, response_data['flavor']['googoose'])
+
+ def test_get_resources_with_mgr(self):
+
+ test_resp = "Gooey goo for chewy chewing!"
+
+ app = fakes.wsgi_app()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/v1.1/flavors/1")
+ request.environ['api.version'] = '1.1'
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ response_data = json.loads(response.body)
+ self.assertEqual(test_resp, response_data['flavor']['googoose'])
+ self.assertEqual("Pig Bands!", response_data['big_bands'])
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index 7667753f4..9746e8168 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
+
import webob
import webob.dec
import webob.exc
@@ -24,35 +26,115 @@ from nova.api.openstack import faults
class TestFaults(test.TestCase):
+ """Tests covering `nova.api.openstack.faults:Fault` class."""
- def test_fault_parts(self):
- req = webob.Request.blank('/.xml')
- f = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
- resp = req.get_response(f)
+ def _prepare_xml(self, xml_string):
+ """Remove characters from string which hinder XML equality testing."""
+ xml_string = xml_string.replace(" ", "")
+ xml_string = xml_string.replace("\n", "")
+ xml_string = xml_string.replace("\t", "")
+ return xml_string
- first_two_words = resp.body.strip().split()[:2]
- self.assertEqual(first_two_words, ['<badRequest', 'code="400">'])
- body_without_spaces = ''.join(resp.body.split())
- self.assertTrue('<message>scram</message>' in body_without_spaces)
+ def test_400_fault_xml(self):
+ """Test fault serialized to XML via file-extension and/or header."""
+ requests = [
+ webob.Request.blank('/.xml'),
+ webob.Request.blank('/', headers={"Accept": "application/xml"}),
+ ]
- def test_retry_header(self):
- req = webob.Request.blank('/.xml')
- exc = webob.exc.HTTPRequestEntityTooLarge(explanation='sorry',
- headers={'Retry-After': 4})
- f = faults.Fault(exc)
- resp = req.get_response(f)
- first_two_words = resp.body.strip().split()[:2]
- self.assertEqual(first_two_words, ['<overLimit', 'code="413">'])
- body_sans_spaces = ''.join(resp.body.split())
- self.assertTrue('<message>sorry</message>' in body_sans_spaces)
- self.assertTrue('<retryAfter>4</retryAfter>' in body_sans_spaces)
- self.assertEqual(resp.headers['Retry-After'], 4)
+ for request in requests:
+ fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
+ response = request.get_response(fault)
+
+ expected = self._prepare_xml("""
+ <badRequest code="400">
+ <message>scram</message>
+ </badRequest>
+ """)
+ actual = self._prepare_xml(response.body)
+
+ self.assertEqual(response.content_type, "application/xml")
+ self.assertEqual(expected, actual)
+
+ def test_400_fault_json(self):
+ """Test fault serialized to JSON via file-extension and/or header."""
+ requests = [
+ webob.Request.blank('/.json'),
+ webob.Request.blank('/', headers={"Accept": "application/json"}),
+ ]
+
+ for request in requests:
+ fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
+ response = request.get_response(fault)
+
+ expected = {
+ "badRequest": {
+ "message": "scram",
+ "code": 400,
+ },
+ }
+ actual = json.loads(response.body)
+
+ self.assertEqual(response.content_type, "application/json")
+ self.assertEqual(expected, actual)
+
+ def test_413_fault_xml(self):
+ requests = [
+ webob.Request.blank('/.xml'),
+ webob.Request.blank('/', headers={"Accept": "application/xml"}),
+ ]
+
+ for request in requests:
+ exc = webob.exc.HTTPRequestEntityTooLarge
+ fault = faults.Fault(exc(explanation='sorry',
+ headers={'Retry-After': 4}))
+ response = request.get_response(fault)
+
+ expected = self._prepare_xml("""
+ <overLimit code="413">
+ <message>sorry</message>
+ <retryAfter>4</retryAfter>
+ </overLimit>
+ """)
+ actual = self._prepare_xml(response.body)
+
+ self.assertEqual(expected, actual)
+ self.assertEqual(response.content_type, "application/xml")
+ self.assertEqual(response.headers['Retry-After'], 4)
+
+ def test_413_fault_json(self):
+ """Test fault serialized to JSON via file-extension and/or header."""
+ requests = [
+ webob.Request.blank('/.json'),
+ webob.Request.blank('/', headers={"Accept": "application/json"}),
+ ]
+
+ for request in requests:
+ exc = webob.exc.HTTPRequestEntityTooLarge
+ fault = faults.Fault(exc(explanation='sorry',
+ headers={'Retry-After': 4}))
+ response = request.get_response(fault)
+
+ expected = {
+ "overLimit": {
+ "message": "sorry",
+ "code": 413,
+ "retryAfter": 4,
+ },
+ }
+ actual = json.loads(response.body)
+
+ self.assertEqual(response.content_type, "application/json")
+ self.assertEqual(expected, actual)
def test_raise(self):
+ """Ensure the ability to raise `Fault`s in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise faults.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
+
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
+ self.assertEqual(resp.content_type, "application/xml")
self.assertEqual(resp.status_int, 404)
self.assertTrue('whut?' in resp.body)
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 761265965..954d72adf 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -15,32 +15,249 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
import stubout
import webob
+import nova.db.api
+from nova import context
+from nova import exception
from nova import test
-import nova.api
-from nova.api.openstack import flavors
from nova.tests.api.openstack import fakes
+def stub_flavor(flavorid, name, memory_mb="256", local_gb="10"):
+ return {
+ "flavorid": str(flavorid),
+ "name": name,
+ "memory_mb": memory_mb,
+ "local_gb": local_gb,
+ }
+
+
+def return_instance_type_by_flavor_id(context, flavorid):
+ return stub_flavor(flavorid, "flavor %s" % (flavorid,))
+
+
+def return_instance_types(context, num=2):
+ instance_types = {}
+ for i in xrange(1, num + 1):
+ name = "flavor %s" % (i,)
+ instance_types[name] = stub_flavor(i, name)
+ return instance_types
+
+
+def return_instance_type_not_found(context, flavorid):
+ raise exception.NotFound()
+
+
class FlavorsTest(test.TestCase):
def setUp(self):
super(FlavorsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(nova.db.api, "instance_type_get_all",
+ return_instance_types)
+ self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id",
+ return_instance_type_by_flavor_id)
+ self.context = context.get_admin_context()
def tearDown(self):
self.stubs.UnsetAll()
super(FlavorsTest, self).tearDown()
- def test_get_flavor_list(self):
+ def test_get_flavor_list_v1_0(self):
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ },
+ ]
+ self.assertEqual(flavors, expected)
+
+ def test_get_flavor_list_detail_v1_0(self):
+ req = webob.Request.blank('/v1.0/flavors/detail')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "256",
+ "disk": "10",
+ },
+ ]
+ self.assertEqual(flavors, expected)
- def test_get_flavor_by_id(self):
- pass
+ def test_get_flavor_by_id_v1_0(self):
+ req = webob.Request.blank('/v1.0/flavors/12')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavor"]
+ expected = {
+ "id": "12",
+ "name": "flavor 12",
+ "ram": "256",
+ "disk": "10",
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_by_invalid_id(self):
+ self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id",
+ return_instance_type_not_found)
+ req = webob.Request.blank('/v1.0/flavors/asdf')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_flavor_by_id_v1_1(self):
+ req = webob.Request.blank('/v1.1/flavors/12')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavor"]
+ expected = {
+ "id": "12",
+ "name": "flavor 12",
+ "ram": "256",
+ "disk": "10",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/12",
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_v1_1(self):
+ req = webob.Request.blank('/v1.1/flavors')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ ],
+ },
+ ]
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_detail_v1_1(self):
+ req = webob.Request.blank('/v1.1/flavors/detail')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "256",
+ "disk": "10",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ ],
+ },
+ ]
+ self.assertEqual(flavor, expected)
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
new file mode 100644
index 000000000..9be753f84
--- /dev/null
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -0,0 +1,166 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+
+from nova import flags
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+
+FLAGS = flags.FLAGS
+
+
+class ImageMetaDataTest(unittest.TestCase):
+
+ IMAGE_FIXTURES = [
+ {'status': 'active',
+ 'name': 'image1',
+ 'deleted': False,
+ 'container_format': None,
+ 'created_at': '2011-03-22T17:40:15',
+ 'disk_format': None,
+ 'updated_at': '2011-03-22T17:40:15',
+ 'id': '1',
+ 'location': 'file:///var/lib/glance/images/1',
+ 'is_public': True,
+ 'deleted_at': None,
+ 'properties': {
+ 'type': 'ramdisk',
+ 'key1': 'value1',
+ 'key2': 'value2'
+ },
+ 'size': 5882349},
+ {'status': 'active',
+ 'name': 'image2',
+ 'deleted': False,
+ 'container_format': None,
+ 'created_at': '2011-03-22T17:40:15',
+ 'disk_format': None,
+ 'updated_at': '2011-03-22T17:40:15',
+ 'id': '2',
+ 'location': 'file:///var/lib/glance/images/2',
+ 'is_public': True,
+ 'deleted_at': None,
+ 'properties': {
+ 'type': 'ramdisk',
+ 'key1': 'value1',
+ 'key2': 'value2'
+ },
+ 'size': 5882349},
+ ]
+
+ def setUp(self):
+ super(ImageMetaDataTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.orig_image_service = FLAGS.image_service
+ FLAGS.image_service = 'nova.image.glance.GlanceImageService'
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_glance(self.stubs, self.IMAGE_FIXTURES)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.image_service = self.orig_image_service
+ super(ImageMetaDataTest, self).tearDown()
+
+ def test_index(self):
+ req = webob.Request.blank('/v1.1/images/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+
+ def test_show(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_show_not_found(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key9')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_create(self):
+ req = webob.Request.blank('/v1.1/images/2/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = '{"metadata": {"key9": "value9"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value9', res_dict['metadata']['key9'])
+ # other items should not be modified
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+ self.assertEqual('value2', res_dict['metadata']['key2'])
+ self.assertEqual(1, len(res_dict))
+
+ def test_update_item(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "zz"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('zz', res_dict['key1'])
+
+ def test_update_item_too_many_keys(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/bad')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_delete(self):
+ req = webob.Request.blank('/v1.1/images/2/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_delete_not_found(self):
+ req = webob.Request.blank('/v1.1/images/2/meta/blah')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index e232bc3d5..69cc3116d 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -20,12 +20,18 @@ Tests of the new image services, both as a service layer,
and as a WSGI layer
"""
+import copy
import json
import datetime
+import os
+import shutil
+import tempfile
+import xml.dom.minidom as minidom
import stubout
import webob
+from glance import client as glance_client
from nova import context
from nova import exception
from nova import flags
@@ -39,86 +45,57 @@ from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
-class BaseImageServiceTests(object):
-
+class _BaseImageServiceTests(test.TestCase):
"""Tasks to test for all image services"""
- def test_create(self):
-
- fixture = {'name': 'test image',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}
+ def __init__(self, *args, **kwargs):
+ super(_BaseImageServiceTests, self).__init__(*args, **kwargs)
+ self.service = None
+ self.context = None
+ def test_create(self):
+ fixture = self._make_fixture('test image')
num_images = len(self.service.index(self.context))
- id = self.service.create(self.context, fixture)
+ image_id = self.service.create(self.context, fixture)['id']
- self.assertNotEquals(None, id)
+ self.assertNotEquals(None, image_id)
self.assertEquals(num_images + 1,
len(self.service.index(self.context)))
def test_create_and_show_non_existing_image(self):
-
- fixture = {'name': 'test image',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}
-
+ fixture = self._make_fixture('test image')
num_images = len(self.service.index(self.context))
- id = self.service.create(self.context, fixture)
-
- self.assertNotEquals(None, id)
+ image_id = self.service.create(self.context, fixture)['id']
+ self.assertNotEquals(None, image_id)
self.assertRaises(exception.NotFound,
self.service.show,
self.context,
'bad image id')
def test_update(self):
-
- fixture = {'name': 'test image',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}
-
- id = self.service.create(self.context, fixture)
-
+ fixture = self._make_fixture('test image')
+ image_id = self.service.create(self.context, fixture)['id']
fixture['status'] = 'in progress'
- self.service.update(self.context, id, fixture)
- new_image_data = self.service.show(self.context, id)
+ self.service.update(self.context, image_id, fixture)
+
+ new_image_data = self.service.show(self.context, image_id)
self.assertEquals('in progress', new_image_data['status'])
def test_delete(self):
-
- fixtures = [
- {'name': 'test image 1',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None},
- {'name': 'test image 2',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}]
+ fixture1 = self._make_fixture('test image 1')
+ fixture2 = self._make_fixture('test image 2')
+ fixtures = [fixture1, fixture2]
num_images = len(self.service.index(self.context))
self.assertEquals(0, num_images, str(self.service.index(self.context)))
ids = []
for fixture in fixtures:
- new_id = self.service.create(self.context, fixture)
+ new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.index(self.context))
@@ -129,31 +106,66 @@ class BaseImageServiceTests(object):
num_images = len(self.service.index(self.context))
self.assertEquals(1, num_images)
+ def test_index(self):
+ fixture = self._make_fixture('test image')
+ image_id = self.service.create(self.context, fixture)['id']
+ image_metas = self.service.index(self.context)
+ expected = [{'id': 'DONTCARE', 'name': 'test image'}]
+ self.assertDictListMatch(image_metas, expected)
-class LocalImageServiceTest(test.TestCase,
- BaseImageServiceTests):
+ @staticmethod
+ def _make_fixture(name):
+ fixture = {'name': 'test image',
+ 'updated': None,
+ 'created': None,
+ 'status': None,
+ 'is_public': True}
+ return fixture
+
+
+class LocalImageServiceTest(_BaseImageServiceTests):
"""Tests the local image service"""
def setUp(self):
super(LocalImageServiceTest, self).setUp()
+ self.tempdir = tempfile.mkdtemp()
+ self.flags(images_path=self.tempdir)
self.stubs = stubout.StubOutForTesting()
service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(None, None)
def tearDown(self):
- self.service.delete_all()
- self.service.delete_imagedir()
+ shutil.rmtree(self.tempdir)
self.stubs.UnsetAll()
super(LocalImageServiceTest, self).tearDown()
+ def test_get_all_ids_with_incorrect_directory_formats(self):
+ # create some old-style image directories (starting with 'ami-')
+ for x in [1, 2, 3]:
+ tempfile.mkstemp(prefix='ami-', dir=self.tempdir)
+ # create some valid image directories names
+ for x in ["1485baed", "1a60f0ee", "3123a73d"]:
+ os.makedirs(os.path.join(self.tempdir, x))
+ found_image_ids = self.service._ids()
+ self.assertEqual(True, isinstance(found_image_ids, list))
+ self.assertEqual(3, len(found_image_ids), len(found_image_ids))
-class GlanceImageServiceTest(test.TestCase,
- BaseImageServiceTests):
- """Tests the local image service"""
+class GlanceImageServiceTest(_BaseImageServiceTests):
+
+ """Tests the Glance image service, in particular that metadata translation
+ works properly.
+ At a high level, the translations involved are:
+
+ 1. Glance -> ImageService - This is needed so we can support
+ multple ImageServices (Glance, Local, etc)
+
+ 2. ImageService -> API - This is needed so we can support multple
+ APIs (OpenStack, EC2)
+ """
def setUp(self):
super(GlanceImageServiceTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
@@ -161,91 +173,578 @@ class GlanceImageServiceTest(test.TestCase,
fakes.stub_out_compute_api_snapshot(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
- self.context = context.RequestContext(None, None)
+ self.context = context.RequestContext(1, None)
self.service.delete_all()
+ self.sent_to_glance = {}
+ fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
def tearDown(self):
self.stubs.UnsetAll()
super(GlanceImageServiceTest, self).tearDown()
+ def test_create_with_instance_id(self):
+ """Ensure instance_id is persisted as an image-property"""
+ fixture = {'name': 'test image',
+ 'is_public': False,
+ 'properties': {'instance_id': '42', 'user_id': '1'}}
-class ImageControllerWithGlanceServiceTest(test.TestCase):
+ image_id = self.service.create(self.context, fixture)['id']
+ expected = fixture
+ self.assertDictMatch(self.sent_to_glance['metadata'], expected)
+
+ image_meta = self.service.show(self.context, image_id)
+ expected = {'id': image_id,
+ 'name': 'test image',
+ 'is_public': False,
+ 'properties': {'instance_id': '42', 'user_id': '1'}}
+ self.assertDictMatch(image_meta, expected)
- """Test of the OpenStack API /images application controller"""
-
- # Registered images at start of each test.
-
- IMAGE_FIXTURES = [
- {'id': '23g2ogk23k4hhkk4k42l',
- 'imageId': '23g2ogk23k4hhkk4k42l',
- 'name': 'public image #1',
- 'created_at': str(datetime.datetime.utcnow()),
- 'updated_at': str(datetime.datetime.utcnow()),
- 'deleted_at': None,
- 'deleted': False,
- 'is_public': True,
- 'status': 'available',
- 'image_type': 'kernel'},
- {'id': 'slkduhfas73kkaskgdas',
- 'imageId': 'slkduhfas73kkaskgdas',
- 'name': 'public image #2',
- 'created_at': str(datetime.datetime.utcnow()),
- 'updated_at': str(datetime.datetime.utcnow()),
- 'deleted_at': None,
- 'deleted': False,
- 'is_public': True,
- 'status': 'available',
- 'image_type': 'ramdisk'}]
+ image_metas = self.service.detail(self.context)
+ self.assertDictMatch(image_metas[0], expected)
+
+ def test_create_without_instance_id(self):
+ """
+ Ensure we can create an image without having to specify an
+ instance_id. Public images are an example of an image not tied to an
+ instance.
+ """
+ fixture = {'name': 'test image'}
+ image_id = self.service.create(self.context, fixture)['id']
+
+ expected = {'name': 'test image', 'properties': {}}
+ self.assertDictMatch(self.sent_to_glance['metadata'], expected)
+
+
+class ImageControllerWithGlanceServiceTest(test.TestCase):
+ """
+ Test of the OpenStack API /images application controller w/Glance.
+ """
+ NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
+ NOW_API_FORMAT = "2010-10-11T10:30:22Z"
def setUp(self):
+ """Run before each test."""
super(ImageControllerWithGlanceServiceTest, self).setUp()
self.orig_image_service = FLAGS.image_service
FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
- fakes.stub_out_glance(self.stubs, initial_fixtures=self.IMAGE_FIXTURES)
+ self.fixtures = self._make_image_fixtures()
+ fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
def tearDown(self):
+ """Run after each test."""
self.stubs.UnsetAll()
FLAGS.image_service = self.orig_image_service
super(ImageControllerWithGlanceServiceTest, self).tearDown()
- def test_get_image_index(self):
- req = webob.Request.blank('/v1.0/images')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
-
- fixture_index = [dict(id=f['id'], name=f['name']) for f
- in self.IMAGE_FIXTURES]
+ def _applicable_fixture(self, fixture, user_id):
+ """Determine if this fixture is applicable for given user id."""
+ is_public = fixture["is_public"]
+ try:
+ uid = int(fixture["properties"]["user_id"])
+ except KeyError:
+ uid = None
+ return uid == user_id or is_public
- for image in res_dict['images']:
- self.assertEquals(1, fixture_index.count(image),
- "image %s not in fixture index!" % str(image))
+ def test_get_image_index(self):
+ request = webob.Request.blank('/v1.0/images')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ expected = [{'id': 123, 'name': 'public image'},
+ {'id': 124, 'name': 'queued backup'},
+ {'id': 125, 'name': 'saving backup'},
+ {'id': 126, 'name': 'active backup'},
+ {'id': 127, 'name': 'killed backup'},
+ {'id': 129, 'name': None}]
+
+ self.assertDictListMatch(response_list, expected)
+
+ def test_get_image(self):
+ request = webob.Request.blank('/v1.0/images/123')
+ response = request.get_response(fakes.wsgi_app())
+
+ self.assertEqual(200, response.status_int)
+
+ actual_image = json.loads(response.body)
+
+ expected_image = {
+ "image": {
+ "id": 123,
+ "name": "public image",
+ "updated": self.NOW_API_FORMAT,
+ "created": self.NOW_API_FORMAT,
+ "status": "ACTIVE",
+ },
+ }
+
+ self.assertEqual(expected_image, actual_image)
+
+ def test_get_image_v1_1(self):
+ request = webob.Request.blank('/v1.1/images/123')
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = json.loads(response.body)
+
+ href = "http://localhost/v1.1/images/123"
+
+ expected_image = {
+ "image": {
+ "id": 123,
+ "name": "public image",
+ "updated": self.NOW_API_FORMAT,
+ "created": self.NOW_API_FORMAT,
+ "status": "ACTIVE",
+ "links": [{
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ }],
+ },
+ }
+
+ self.assertEqual(expected_image, actual_image)
+
+ def test_get_image_xml(self):
+ request = webob.Request.blank('/v1.0/images/123')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = minidom.parseString(response.body.replace(" ", ""))
+
+ expected_now = self.NOW_API_FORMAT
+ expected_image = minidom.parseString("""
+ <image id="123"
+ name="public image"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE" />
+ """ % (locals()))
+
+ self.assertEqual(expected_image.toxml(), actual_image.toxml())
+
+ def test_get_image_xml_no_name(self):
+ request = webob.Request.blank('/v1.0/images/129')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = minidom.parseString(response.body.replace(" ", ""))
+
+ expected_now = self.NOW_API_FORMAT
+ expected_image = minidom.parseString("""
+ <image id="129"
+ name="None"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE" />
+ """ % (locals()))
+
+ self.assertEqual(expected_image.toxml(), actual_image.toxml())
+
+ def test_get_image_v1_1_xml(self):
+ request = webob.Request.blank('/v1.1/images/123')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = minidom.parseString(response.body.replace(" ", ""))
+
+ expected_href = "http://localhost/v1.1/images/123"
+ expected_now = self.NOW_API_FORMAT
+ expected_image = minidom.parseString("""
+ <image id="123"
+ name="public image"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <links>
+ <link href="%(expected_href)s" rel="self"/>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/xml" />
+ </links>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected_image.toxml(), actual_image.toxml())
+
+ def test_get_image_404_json(self):
+ request = webob.Request.blank('/v1.0/images/NonExistantImage')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = {
+ "itemNotFound": {
+ "message": "Image not found.",
+ "code": 404,
+ },
+ }
+
+ actual = json.loads(response.body)
+
+ self.assertEqual(expected, actual)
+
+ def test_get_image_404_xml(self):
+ request = webob.Request.blank('/v1.0/images/NonExistantImage')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = minidom.parseString("""
+ <itemNotFound code="404">
+ <message>
+ Image not found.
+ </message>
+ </itemNotFound>
+ """.replace(" ", ""))
+
+ actual = minidom.parseString(response.body.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_get_image_404_v1_1_json(self):
+ request = webob.Request.blank('/v1.1/images/NonExistantImage')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = {
+ "itemNotFound": {
+ "message": "Image not found.",
+ "code": 404,
+ },
+ }
+
+ actual = json.loads(response.body)
+
+ self.assertEqual(expected, actual)
+
+ def test_get_image_404_v1_1_xml(self):
+ request = webob.Request.blank('/v1.1/images/NonExistantImage')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = minidom.parseString("""
+ <itemNotFound code="404">
+ <message>
+ Image not found.
+ </message>
+ </itemNotFound>
+ """.replace(" ", ""))
+
+ actual = minidom.parseString(response.body.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_get_image_index_v1_1(self):
+ request = webob.Request.blank('/v1.1/images')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ fixtures = copy.copy(self.fixtures)
+
+ for image in fixtures:
+ if not self._applicable_fixture(image, 1):
+ fixtures.remove(image)
+ continue
+
+ href = "http://localhost/v1.1/images/%s" % image["id"]
+ test_image = {
+ "id": image["id"],
+ "name": image["name"],
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/%s" % image["id"],
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ }],
+ }
+ self.assertTrue(test_image in response_list)
+
+ self.assertEqual(len(response_list), len(fixtures))
def test_get_image_details(self):
- req = webob.Request.blank('/v1.0/images/detail')
+ request = webob.Request.blank('/v1.0/images/detail')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ expected = [{
+ 'id': 123,
+ 'name': 'public image',
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ },
+ {
+ 'id': 124,
+ 'name': 'queued backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'QUEUED',
+ },
+ {
+ 'id': 125,
+ 'name': 'saving backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 0,
+ },
+ {
+ 'id': 126,
+ 'name': 'active backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE'
+ },
+ {
+ 'id': 127,
+ 'name': 'killed backup', 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'FAILED',
+ },
+ {
+ 'id': 129,
+ 'name': None,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ }]
+
+ self.assertDictListMatch(expected, response_list)
+
+ def test_get_image_details_v1_1(self):
+ request = webob.Request.blank('/v1.1/images/detail')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ expected = [{
+ 'id': 123,
+ 'name': 'public image',
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/123",
+ }],
+ },
+ {
+ 'id': 124,
+ 'name': 'queued backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'QUEUED',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/124",
+ }],
+ },
+ {
+ 'id': 125,
+ 'name': 'saving backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 0,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/125",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/125",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/125",
+ }],
+ },
+ {
+ 'id': 126,
+ 'name': 'active backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/126",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/126",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/126",
+ }],
+ },
+ {
+ 'id': 127,
+ 'name': 'killed backup', 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'FAILED',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/127",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/127",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/127",
+ }],
+ },
+ {
+ 'id': 129,
+ 'name': None,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/129",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/129",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/129",
+ }],
+ },
+ ]
+
+ self.assertDictListMatch(expected, response_list)
+
+ def test_get_image_found(self):
+ req = webob.Request.blank('/v1.0/images/123')
+ res = req.get_response(fakes.wsgi_app())
+ image_meta = json.loads(res.body)['image']
+ expected = {'id': 123, 'name': 'public image',
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE'}
+ self.assertDictMatch(image_meta, expected)
+
+ def test_get_image_non_existent(self):
+ req = webob.Request.blank('/v1.0/images/4242')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_image_not_owned(self):
+ """We should return a 404 if we request an image that doesn't belong
+ to us
+ """
+ req = webob.Request.blank('/v1.0/images/128')
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
-
- def _is_equivalent_subset(x, y):
- if set(x) <= set(y):
- for k, v in x.iteritems():
- if x[k] != y[k]:
- if x[k] == 'active' and y[k] == 'available':
- continue
- return False
- return True
- return False
-
- for image in res_dict['images']:
- for image_fixture in self.IMAGE_FIXTURES:
- if _is_equivalent_subset(image, image_fixture):
- break
- else:
- self.assertEquals(1, 2, "image %s not in fixtures!" %
- str(image))
+ self.assertEqual(res.status_int, 404)
+
+ @classmethod
+ def _make_image_fixtures(cls):
+ image_id = 123
+ base_attrs = {'created_at': cls.NOW_GLANCE_FORMAT,
+ 'updated_at': cls.NOW_GLANCE_FORMAT,
+ 'deleted_at': None,
+ 'deleted': False}
+
+ fixtures = []
+
+ def add_fixture(**kwargs):
+ kwargs.update(base_attrs)
+ fixtures.append(kwargs)
+
+ # Public image
+ add_fixture(id=image_id, name='public image', is_public=True,
+ status='active', properties={})
+ image_id += 1
+
+ # Backup for User 1
+ backup_properties = {'instance_id': '42', 'user_id': '1'}
+ for status in ('queued', 'saving', 'active', 'killed'):
+ add_fixture(id=image_id, name='%s backup' % status,
+ is_public=False, status=status,
+ properties=backup_properties)
+ image_id += 1
+
+ # Backup for User 2
+ other_backup_properties = {'instance_id': '43', 'user_id': '2'}
+ add_fixture(id=image_id, name='someone elses backup', is_public=False,
+ status='active', properties=other_backup_properties)
+ image_id += 1
+
+ # Image without a name
+ add_fixture(id=image_id, is_public=True, status='active',
+ properties={})
+ image_id += 1
+
+ return fixtures
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
new file mode 100644
index 000000000..05cfacc60
--- /dev/null
+++ b/nova/tests/api/openstack/test_limits.py
@@ -0,0 +1,584 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests dealing with HTTP rate-limiting.
+"""
+
+import httplib
+import json
+import StringIO
+import stubout
+import time
+import unittest
+import webob
+
+from xml.dom.minidom import parseString
+
+from nova.api.openstack import limits
+from nova.api.openstack.limits import Limit
+
+
+TEST_LIMITS = [
+ Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
+ Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
+ Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE),
+ Limit("PUT", "*", "", 10, limits.PER_MINUTE),
+ Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE),
+]
+
+
+class BaseLimitTestSuite(unittest.TestCase):
+ """Base test suite which provides relevant stubs and time abstraction."""
+
+ def setUp(self):
+ """Run before each test."""
+ self.time = 0.0
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(limits.Limit, "_get_time", self._get_time)
+
+ def tearDown(self):
+ """Run after each test."""
+ self.stubs.UnsetAll()
+
+ def _get_time(self):
+ """Return the "time" according to this test suite."""
+ return self.time
+
+
+class LimitsControllerTest(BaseLimitTestSuite):
+ """
+ Tests for `limits.LimitsController` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.controller = limits.LimitsController()
+
+ def _get_index_request(self, accept_header="application/json"):
+ """Helper to set routing arguments."""
+ request = webob.Request.blank("/")
+ request.accept = accept_header
+ request.environ["wsgiorg.routing_args"] = (None, {
+ "action": "index",
+ "controller": "",
+ })
+ return request
+
+ def _populate_limits(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ Limit("GET", "*", ".*", 10, 60).display(),
+ Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_empty_index_json(self):
+ """Test getting empty limit details in JSON."""
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_index_json(self):
+ """Test getting limit details in JSON."""
+ request = self._get_index_request()
+ request = self._populate_limits(request)
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [{
+ "regex": ".*",
+ "resetTime": 0,
+ "URI": "*",
+ "value": 10,
+ "verb": "GET",
+ "remaining": 10,
+ "unit": "MINUTE",
+ },
+ {
+ "regex": ".*",
+ "resetTime": 0,
+ "URI": "*",
+ "value": 5,
+ "verb": "POST",
+ "remaining": 5,
+ "unit": "HOUR",
+ }],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_empty_index_xml(self):
+ """Test getting limit details in XML."""
+ request = self._get_index_request("application/xml")
+ response = request.get_response(self.controller)
+
+ expected = "<limits><rate/><absolute/></limits>"
+ body = response.body.replace("\n", "").replace(" ", "")
+
+ self.assertEqual(expected, body)
+
+ def test_index_xml(self):
+ """Test getting limit details in XML."""
+ request = self._get_index_request("application/xml")
+ request = self._populate_limits(request)
+ response = request.get_response(self.controller)
+
+ expected = parseString("""
+ <limits>
+ <rate>
+ <limit URI="*" regex=".*" remaining="10" resetTime="0"
+ unit="MINUTE" value="10" verb="GET"/>
+ <limit URI="*" regex=".*" remaining="5" resetTime="0"
+ unit="HOUR" value="5" verb="POST"/>
+ </rate>
+ <absolute/>
+ </limits>
+ """.replace(" ", ""))
+ body = parseString(response.body.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), body.toxml())
+
+
+class LimitMiddlewareTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.RateLimitingMiddleware` class.
+ """
+
+ @webob.dec.wsgify
+ def _empty_app(self, request):
+ """Do-nothing WSGI app."""
+ pass
+
+ def setUp(self):
+ """Prepare middleware for use through fake WSGI app."""
+ BaseLimitTestSuite.setUp(self)
+ _limits = [
+ Limit("GET", "*", ".*", 1, 60),
+ ]
+ self.app = limits.RateLimitingMiddleware(self._empty_app, _limits)
+
+ def test_good_request(self):
+ """Test successful GET request through middleware."""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ def test_limited_request_json(self):
+ """Test a rate-limited (403) GET request through middleware."""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 403)
+
+ body = json.loads(response.body)
+ expected = "Only 1 GET request(s) can be made to * every minute."
+ value = body["overLimitFault"]["details"].strip()
+ self.assertEqual(value, expected)
+
+ def test_limited_request_xml(self):
+ """Test a rate-limited (403) response as XML"""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ request.accept = "application/xml"
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 403)
+
+ root = parseString(response.body).childNodes[0]
+ expected = "Only 1 GET request(s) can be made to * every minute."
+
+ details = root.getElementsByTagName("details")
+ self.assertEqual(details.length, 1)
+
+ value = details.item(0).firstChild.data.strip()
+ self.assertEqual(value, expected)
+
+
+class LimitTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.Limit` class.
+ """
+
+ def test_GET_no_delay(self):
+ """Test a limit handles 1 GET per second."""
+ limit = Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+ self.assertEqual(0, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ def test_GET_delay(self):
+ """Test two calls to 1 GET per second limit."""
+ limit = Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(1, delay)
+ self.assertEqual(1, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ self.time += 4
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+ self.assertEqual(4, limit.next_request)
+ self.assertEqual(4, limit.last_request)
+
+
+class LimiterTest(BaseLimitTestSuite):
+ """
+ Tests for the in-memory `limits.Limiter` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.limiter = limits.Limiter(TEST_LIMITS)
+
+ def _check(self, num, verb, url, username=None):
+ """Check and yield results from checks."""
+ for x in xrange(num):
+ yield self.limiter.check_for_delay(verb, url, username)[0]
+
+ def _check_sum(self, num, verb, url, username=None):
+ """Check and sum results from checks."""
+ results = self._check(num, verb, url, username)
+ return sum(item for item in results if item)
+
+ def test_no_delay_GET(self):
+ """
+ Simple test to ensure no delay on a single call for a limit verb we
+ didn"t set.
+ """
+ delay = self.limiter.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_no_delay_PUT(self):
+ """
+ Simple test to ensure no delay on a single call for a known limit.
+ """
+ delay = self.limiter.check_for_delay("PUT", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_delay_PUT(self):
+ """
+ Ensure the 11th PUT will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_POST(self):
+ """
+ Ensure the 8th POST will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 7
+ results = list(self._check(7, "POST", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = 60.0 / 7.0
+ results = self._check_sum(1, "POST", "/anything")
+ self.failUnlessAlmostEqual(expected, results, 8)
+
+ def test_delay_GET(self):
+ """
+ Ensure the 11th GET will result in NO delay.
+ """
+ expected = [None] * 11
+ results = list(self._check(11, "GET", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_servers(self):
+ """
+ Ensure PUT on /servers limits at 5 requests, and PUT elsewhere is still
+ OK after 5 requests...but then after 11 total requests, PUT limiting
+ kicks in.
+ """
+ # First 6 requests on PUT /servers
+ expected = [None] * 5 + [12.0]
+ results = list(self._check(6, "PUT", "/servers"))
+ self.assertEqual(expected, results)
+
+ # Next 5 request on PUT /anything
+ expected = [None] * 4 + [6.0]
+ results = list(self._check(5, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_wait(self):
+ """
+ Ensure after hitting the limit and then waiting for the correct
+ amount of time, the limit will be lifted.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ # Advance time
+ self.time += 6.0
+
+ expected = [None, 6.0]
+ results = list(self._check(2, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_multiple_delays(self):
+ """
+ Ensure multiple requests still get a delay.
+ """
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_multiple_users(self):
+ """
+ Tests involving multiple users.
+ """
+ # User1
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ # User2
+ expected = [None] * 10 + [6.0] * 5
+ results = list(self._check(15, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [4.0] * 5
+ results = list(self._check(5, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+
+class WsgiLimiterTest(BaseLimitTestSuite):
+ """
+ Tests for `limits.WsgiLimiter` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+
+ def _request_data(self, verb, path):
+ """Get data decribing a limit request verb/path."""
+ return json.dumps({"verb": verb, "path": path})
+
+ def _request(self, verb, url, username=None):
+ """Make sure that POSTing to the given url causes the given username
+ to perform the given action. Make the internal rate limiter return
+ delay and make sure that the WSGI app returns the correct response.
+ """
+ if username:
+ request = webob.Request.blank("/%s" % username)
+ else:
+ request = webob.Request.blank("/")
+
+ request.method = "POST"
+ request.body = self._request_data(verb, url)
+ response = request.get_response(self.app)
+
+ if "X-Wait-Seconds" in response.headers:
+ self.assertEqual(response.status_int, 403)
+ return response.headers["X-Wait-Seconds"]
+
+ self.assertEqual(response.status_int, 204)
+
+ def test_invalid_methods(self):
+ """Only POSTs should work."""
+ requests = []
+ for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
+ request = webob.Request.blank("/")
+ request.body = self._request_data("GET", "/something")
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 405)
+
+ def test_good_url(self):
+ delay = self._request("GET", "/something")
+ self.assertEqual(delay, None)
+
+ def test_escaping(self):
+ delay = self._request("GET", "/something/jump%20up")
+ self.assertEqual(delay, None)
+
+ def test_response_to_delays(self):
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, '60.00')
+
+ def test_response_to_delays_usernames(self):
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, '60.00')
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, '60.00')
+
+
+class FakeHttplibSocket(object):
+ """
+ Fake `httplib.HTTPResponse` replacement.
+ """
+
+ def __init__(self, response_string):
+ """Initialize new `FakeHttplibSocket`."""
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """
+ Fake `httplib.HTTPConnection`.
+ """
+
+ def __init__(self, app, host):
+ """
+ Initialize `FakeHttplibConnection`.
+ """
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, body="", headers={}):
+ """
+ Requests made via this connection actually get translated and routed
+ into our WSGI app, we then wait for the response and turn it back into
+ an `httplib.HTTPResponse`.
+ """
+ req = webob.Request.blank(path)
+ req.method = method
+ req.headers = headers
+ req.host = self.host
+ req.body = body
+
+ resp = str(req.get_response(self.app))
+ resp = "HTTP/1.0 %s" % resp
+ sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(sock)
+ self.http_response.begin()
+
+ def getresponse(self):
+ """Return our generated response from the request."""
+ return self.http_response
+
+
+def wire_HTTPConnection_to_WSGI(host, app):
+ """Monkeypatches HTTPConnection so that if you try to connect to host, you
+ are instead routed straight to the given WSGI app.
+
+ After calling this method, when any code calls
+
+ httplib.HTTPConnection(host)
+
+ the connection object will be a fake. Its requests will be sent directly
+ to the given WSGI app rather than through a socket.
+
+ Code connecting to hosts other than host will not be affected.
+
+ This method may be called multiple times to map different hosts to
+ different apps.
+ """
+ class HTTPConnectionDecorator(object):
+ """Wraps the real HTTPConnection class so that when you instantiate
+ the class you might instead get a fake instance."""
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def __call__(self, connection_host, *args, **kwargs):
+ if connection_host == host:
+ return FakeHttplibConnection(app, host)
+ else:
+ return self.wrapped(connection_host, *args, **kwargs)
+
+ httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
+
+
+class WsgiLimiterProxyTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.WsgiLimiterProxy` class.
+ """
+
+ def setUp(self):
+ """
+ Do some nifty HTTP/WSGI magic which allows for WSGI to be called
+ directly by something like the `httplib` library.
+ """
+ BaseLimitTestSuite.setUp(self)
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+ wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)
+ self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
+
+ def test_200(self):
+ """Successful request test."""
+ delay = self.proxy.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_403(self):
+ """Forbidden request test."""
+ delay = self.proxy.check_for_delay("GET", "/delayed")
+ self.assertEqual(delay, (None, None))
+
+ delay, error = self.proxy.check_for_delay("GET", "/delayed")
+ error = error.strip()
+
+ expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "\
+ "made to /delayed every minute.")
+
+ self.assertEqual((delay, error), expected)
diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py
deleted file mode 100644
index 9ae90ee20..000000000
--- a/nova/tests/api/openstack/test_ratelimiting.py
+++ /dev/null
@@ -1,243 +0,0 @@
-import httplib
-import StringIO
-import time
-import webob
-
-from nova import test
-import nova.api.openstack.ratelimiting as ratelimiting
-
-
-class LimiterTest(test.TestCase):
-
- def setUp(self):
- super(LimiterTest, self).setUp()
- self.limits = {
- 'a': (5, ratelimiting.PER_SECOND),
- 'b': (5, ratelimiting.PER_MINUTE),
- 'c': (5, ratelimiting.PER_HOUR),
- 'd': (1, ratelimiting.PER_SECOND),
- 'e': (100, ratelimiting.PER_SECOND)}
- self.rl = ratelimiting.Limiter(self.limits)
-
- def exhaust(self, action, times_until_exhausted, **kwargs):
- for i in range(times_until_exhausted):
- when = self.rl.perform(action, **kwargs)
- self.assertEqual(when, None)
- num, period = self.limits[action]
- delay = period * 1.0 / num
- # Verify that we are now thoroughly delayed
- for i in range(10):
- when = self.rl.perform(action, **kwargs)
- self.assertAlmostEqual(when, delay, 2)
-
- def test_second(self):
- self.exhaust('a', 5)
- time.sleep(0.2)
- self.exhaust('a', 1)
- time.sleep(1)
- self.exhaust('a', 5)
-
- def test_minute(self):
- self.exhaust('b', 5)
-
- def test_one_per_period(self):
- def allow_once_and_deny_once():
- when = self.rl.perform('d')
- self.assertEqual(when, None)
- when = self.rl.perform('d')
- self.assertAlmostEqual(when, 1, 2)
- return when
- time.sleep(allow_once_and_deny_once())
- time.sleep(allow_once_and_deny_once())
- allow_once_and_deny_once()
-
- def test_we_can_go_indefinitely_if_we_spread_out_requests(self):
- for i in range(200):
- when = self.rl.perform('e')
- self.assertEqual(when, None)
- time.sleep(0.01)
-
- def test_users_get_separate_buckets(self):
- self.exhaust('c', 5, username='alice')
- self.exhaust('c', 5, username='bob')
- self.exhaust('c', 5, username='chuck')
- self.exhaust('c', 0, username='chuck')
- self.exhaust('c', 0, username='bob')
- self.exhaust('c', 0, username='alice')
-
-
-class FakeLimiter(object):
- """Fake Limiter class that you can tell how to behave."""
-
- def __init__(self, test):
- self._action = self._username = self._delay = None
- self.test = test
-
- def mock(self, action, username, delay):
- self._action = action
- self._username = username
- self._delay = delay
-
- def perform(self, action, username):
- self.test.assertEqual(action, self._action)
- self.test.assertEqual(username, self._username)
- return self._delay
-
-
-class WSGIAppTest(test.TestCase):
-
- def setUp(self):
- super(WSGIAppTest, self).setUp()
- self.limiter = FakeLimiter(self)
- self.app = ratelimiting.WSGIApp(self.limiter)
-
- def test_invalid_methods(self):
- requests = []
- for method in ['GET', 'PUT', 'DELETE']:
- req = webob.Request.blank('/limits/michael/breakdance',
- dict(REQUEST_METHOD=method))
- requests.append(req)
- for req in requests:
- self.assertEqual(req.get_response(self.app).status_int, 405)
-
- def test_invalid_urls(self):
- requests = []
- for prefix in ['limit', '', 'limiter2', 'limiter/limits', 'limiter/1']:
- req = webob.Request.blank('/%s/michael/breakdance' % prefix,
- dict(REQUEST_METHOD='POST'))
- requests.append(req)
- for req in requests:
- self.assertEqual(req.get_response(self.app).status_int, 404)
-
- def verify(self, url, username, action, delay=None):
- """Make sure that POSTing to the given url causes the given username
- to perform the given action. Make the internal rate limiter return
- delay and make sure that the WSGI app returns the correct response.
- """
- req = webob.Request.blank(url, dict(REQUEST_METHOD='POST'))
- self.limiter.mock(action, username, delay)
- resp = req.get_response(self.app)
- if not delay:
- self.assertEqual(resp.status_int, 200)
- else:
- self.assertEqual(resp.status_int, 403)
- self.assertEqual(resp.headers['X-Wait-Seconds'], "%.2f" % delay)
-
- def test_good_urls(self):
- self.verify('/limiter/michael/hoot', 'michael', 'hoot')
-
- def test_escaping(self):
- self.verify('/limiter/michael/jump%20up', 'michael', 'jump up')
-
- def test_response_to_delays(self):
- self.verify('/limiter/michael/hoot', 'michael', 'hoot', 1)
- self.verify('/limiter/michael/hoot', 'michael', 'hoot', 1.56)
- self.verify('/limiter/michael/hoot', 'michael', 'hoot', 1000)
-
-
-class FakeHttplibSocket(object):
- """a fake socket implementation for httplib.HTTPResponse, trivial"""
-
- def __init__(self, response_string):
- self._buffer = StringIO.StringIO(response_string)
-
- def makefile(self, _mode, _other):
- """Returns the socket's internal buffer"""
- return self._buffer
-
-
-class FakeHttplibConnection(object):
- """A fake httplib.HTTPConnection
-
- Requests made via this connection actually get translated and routed into
- our WSGI app, we then wait for the response and turn it back into
- an httplib.HTTPResponse.
- """
- def __init__(self, app, host, is_secure=False):
- self.app = app
- self.host = host
-
- def request(self, method, path, data='', headers={}):
- req = webob.Request.blank(path)
- req.method = method
- req.body = data
- req.headers = headers
- req.host = self.host
- # Call the WSGI app, get the HTTP response
- resp = str(req.get_response(self.app))
- # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
- # guess that's a function the web server usually provides.
- resp = "HTTP/1.0 %s" % resp
- sock = FakeHttplibSocket(resp)
- self.http_response = httplib.HTTPResponse(sock)
- self.http_response.begin()
-
- def getresponse(self):
- return self.http_response
-
-
-def wire_HTTPConnection_to_WSGI(host, app):
- """Monkeypatches HTTPConnection so that if you try to connect to host, you
- are instead routed straight to the given WSGI app.
-
- After calling this method, when any code calls
-
- httplib.HTTPConnection(host)
-
- the connection object will be a fake. Its requests will be sent directly
- to the given WSGI app rather than through a socket.
-
- Code connecting to hosts other than host will not be affected.
-
- This method may be called multiple times to map different hosts to
- different apps.
- """
- class HTTPConnectionDecorator(object):
- """Wraps the real HTTPConnection class so that when you instantiate
- the class you might instead get a fake instance."""
-
- def __init__(self, wrapped):
- self.wrapped = wrapped
-
- def __call__(self, connection_host, *args, **kwargs):
- if connection_host == host:
- return FakeHttplibConnection(app, host)
- else:
- return self.wrapped(connection_host, *args, **kwargs)
-
- httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
-
-
-class WSGIAppProxyTest(test.TestCase):
-
- def setUp(self):
- """Our WSGIAppProxy is going to call across an HTTPConnection to a
- WSGIApp running a limiter. The proxy will send input, and the proxy
- should receive that same input, pass it to the limiter who gives a
- result, and send the expected result back.
-
- The HTTPConnection isn't real -- it's monkeypatched to point straight
- at the WSGIApp. And the limiter isn't real -- it's a fake that
- behaves the way we tell it to.
- """
- super(WSGIAppProxyTest, self).setUp()
- self.limiter = FakeLimiter(self)
- app = ratelimiting.WSGIApp(self.limiter)
- wire_HTTPConnection_to_WSGI('100.100.100.100:80', app)
- self.proxy = ratelimiting.WSGIAppProxy('100.100.100.100:80')
-
- def test_200(self):
- self.limiter.mock('conquer', 'caesar', None)
- when = self.proxy.perform('conquer', 'caesar')
- self.assertEqual(when, None)
-
- def test_403(self):
- self.limiter.mock('grumble', 'proletariat', 1.5)
- when = self.proxy.perform('grumble', 'proletariat')
- self.assertEqual(when, 1.5)
-
- def test_failure(self):
- def shouldRaise():
- self.limiter.mock('murder', 'brutus', None)
- self.proxy.perform('stab', 'brutus')
- self.assertRaises(AssertionError, shouldRaise)
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
new file mode 100644
index 000000000..c8d456472
--- /dev/null
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -0,0 +1,164 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+
+def return_create_instance_metadata(context, server_id, metadata):
+ return stub_server_metadata()
+
+
+def return_server_metadata(context, server_id):
+ return stub_server_metadata()
+
+
+def return_empty_server_metadata(context, server_id):
+ return {}
+
+
+def delete_server_metadata(context, server_id, key):
+ pass
+
+
+def stub_server_metadata():
+ metadata = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"
+ }
+ return metadata
+
+
+class ServerMetaDataTest(unittest.TestCase):
+
+ def setUp(self):
+ super(ServerMetaDataTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(ServerMetaDataTest, self).tearDown()
+
+ def test_index(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual(0, len(res_dict['metadata']))
+
+ def test_show(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_meta_not_found(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key6')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_delete(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_delete',
+ delete_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = '{"metadata": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/bad')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 7a25abe9d..313676e72 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 OpenStack LLC.
+# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,21 +15,28 @@
# License for the specific language governing permissions and limitations
# under the License.
+import base64
import datetime
import json
+import unittest
+from xml.dom import minidom
import stubout
import webob
+from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import test
import nova.api.openstack
from nova.api.openstack import servers
+import nova.compute.api
import nova.db.api
from nova.db.sqlalchemy.models import Instance
from nova.db.sqlalchemy.models import InstanceMetadata
import nova.rpc
+from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
@@ -76,7 +83,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
"admin_pass": "",
"user_id": user_id,
"project_id": "",
- "image_id": 10,
+ "image_id": "10",
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
@@ -89,7 +96,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
"local_gb": 0,
"hostname": "",
"host": None,
- "instance_type": "",
+ "instance_type": "1",
"user_data": "",
"reservation_id": "",
"mac_address": "",
@@ -118,7 +125,7 @@ class ServersTest(test.TestCase):
def setUp(self):
super(ServersTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -144,6 +151,8 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
+ self.webreq = common.webob_factory('/v1.0/servers')
+
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
@@ -153,9 +162,36 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['id'], '1')
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+
+ def test_get_server_by_id_v11(self):
+ req = webob.Request.blank('/v1.1/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/servers/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/servers/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/servers/1",
+ },
+ ]
+
+ print res_dict['server']
+ self.assertEqual(res_dict['server']['links'], expected_links)
+
def test_get_server_by_id_with_addresses(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
@@ -164,7 +200,7 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['id'], '1')
+ self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
addresses = res_dict['server']['addresses']
self.assertEqual(len(addresses["public"]), len(public))
@@ -172,6 +208,24 @@ class ServersTest(test.TestCase):
self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0], private)
+ def test_get_server_by_id_with_addresses_v11(self):
+ private = "192.168.0.3"
+ public = ["1.2.3.4"]
+ new_return_server = return_server_with_addresses(private, public)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+ req = webob.Request.blank('/v1.1/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+ addresses = res_dict['server']['addresses']
+ self.assertEqual(len(addresses["public"]), len(public))
+ self.assertEqual(addresses["public"][0],
+ {"version": 4, "addr": public[0]})
+ self.assertEqual(len(addresses["private"]), 1)
+ self.assertEqual(addresses["private"][0],
+ {"version": 4, "addr": private})
+
def test_get_server_list(self):
req = webob.Request.blank('/v1.0/servers')
res = req.get_response(fakes.wsgi_app())
@@ -184,9 +238,97 @@ class ServersTest(test.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
- def test_create_instance(self):
+ def test_get_server_list_v11(self):
+ req = webob.Request.blank('/v1.1/servers')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s.get('imageId', None), None)
+
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/servers/%d" % (i,),
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/servers/%d" % (i,),
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/servers/%d" % (i,),
+ },
+ ]
+
+ self.assertEqual(s['links'], expected_links)
+
+ def test_get_servers_with_limit(self):
+ req = webob.Request.blank('/v1.0/servers?limit=3')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [0, 1, 2])
+
+ req = webob.Request.blank('/v1.0/servers?limit=aaa')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue('limit' in res.body)
+
+ def test_get_servers_with_offset(self):
+ req = webob.Request.blank('/v1.0/servers?offset=2')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [2, 3, 4])
+
+ req = webob.Request.blank('/v1.0/servers?offset=aaa')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue('offset' in res.body)
+
+ def test_get_servers_with_limit_and_offset(self):
+ req = webob.Request.blank('/v1.0/servers?limit=2&offset=1')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [1, 2])
+
+ def test_get_servers_with_bad_limit(self):
+ req = webob.Request.blank('/v1.0/servers?limit=asdf&offset=1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(res.body.find('limit param') > -1)
+
+ def test_get_servers_with_bad_offset(self):
+ req = webob.Request.blank('/v1.0/servers?limit=2&offset=asdf')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(res.body.find('offset param') > -1)
+
+ def test_get_servers_with_marker(self):
+ req = webob.Request.blank('/v1.1/servers?marker=2')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [3, 4])
+
+ def test_get_servers_with_limit_and_marker(self):
+ req = webob.Request.blank('/v1.1/servers?limit=2&marker=1')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [2, 3])
+
+ def test_get_servers_with_bad_marker(self):
+ req = webob.Request.blank('/v1.1/servers?limit=2&marker=asdf')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(res.body.find('marker param') > -1)
+
+ def _setup_for_create_instance(self):
+ """Shared implementation for tests below that create instance"""
def instance_create(context, inst):
- return {'id': '1', 'display_name': ''}
+ return {'id': '1', 'display_name': 'server_test'}
def server_update(context, id, params):
return instance_create(context, id)
@@ -220,87 +362,322 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.api.openstack.common,
"get_image_id_from_image_hash", image_id_from_hash)
+ def _test_create_instance_helper(self):
+ self._setup_for_create_instance()
+
body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2,
+ name='server_test', imageId=3, flavorId=2,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.0/servers')
req.method = 'POST'
req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+
+ server = json.loads(res.body)['server']
+ self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual('server_test', server['name'])
+ self.assertEqual(1, server['id'])
+ self.assertEqual(2, server['flavorId'])
+ self.assertEqual(3, server['imageId'])
+ self.assertEqual(res.status_int, 200)
+
+ def test_create_instance(self):
+ self._test_create_instance_helper()
+
+ def test_create_instance_no_key_pair(self):
+ fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
+ self._test_create_instance_helper()
+
+ def test_create_instance_no_name(self):
+ self._setup_for_create_instance()
+
+ body = {
+ 'server': {
+ 'imageId': 3,
+ 'flavorId': 1,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_nonstring_name(self):
+ self._setup_for_create_instance()
+
+ body = {
+ 'server': {
+ 'name': 12,
+ 'imageId': 3,
+ 'flavorId': 1,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_whitespace_name(self):
+ self._setup_for_create_instance()
+
+ body = {
+ 'server': {
+ 'name': ' ',
+ 'imageId': 3,
+ 'flavorId': 1,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_v11(self):
+ self._setup_for_create_instance()
+
+ imageRef = 'http://localhost/v1.1/images/2'
+ flavorRef = 'http://localhost/v1.1/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': imageRef,
+ 'flavorRef': flavorRef,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
+ server = json.loads(res.body)['server']
+ self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual('server_test', server['name'])
+ self.assertEqual(1, server['id'])
+ self.assertEqual(flavorRef, server['flavorRef'])
+ self.assertEqual(imageRef, server['imageRef'])
self.assertEqual(res.status_int, 200)
+ def test_create_instance_v11_bad_href(self):
+ self._setup_for_create_instance()
+
+ imageRef = 'http://localhost/v1.1/images/asdf'
+ flavorRef = 'http://localhost/v1.1/flavors/3'
+ body = dict(server=dict(
+ name='server_test', imageRef=imageRef, flavorRef=flavorRef,
+ metadata={'hello': 'world', 'open': 'stack'},
+ personality={}))
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_update_no_body(self):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 422)
- def test_update_bad_params(self):
+ def test_update_nonstring_name(self):
+ """ Confirm that update is filtering params """
+ inst_dict = dict(name=12, adminPass='bacon')
+ self.body = json.dumps(dict(server=inst_dict))
+
+ req = webob.Request.blank('/v1.0/servers/1')
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = self.body
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_whitespace_name(self):
+ """ Confirm that update is filtering params """
+ inst_dict = dict(name=' ', adminPass='bacon')
+ self.body = json.dumps(dict(server=inst_dict))
+
+ req = webob.Request.blank('/v1.0/servers/1')
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = self.body
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_null_name(self):
""" Confirm that update is filtering params """
- inst_dict = dict(cat='leopard', name='server_test', adminPass='bacon')
+ inst_dict = dict(name='', adminPass='bacon')
+ self.body = json.dumps(dict(server=inst_dict))
+
+ req = webob.Request.blank('/v1.0/servers/1')
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = self.body
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_server_v10(self):
+ inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
def server_update(context, id, params):
- self.update_called = True
- filtered_dict = dict(name='server_test', admin_pass='bacon')
+ filtered_dict = dict(
+ display_name='server_test',
+ admin_pass='bacon',
+ )
self.assertEqual(params, filtered_dict)
+ return filtered_dict
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
+ req.content_type = "application/json"
req.body = self.body
- req.get_response(fakes.wsgi_app())
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 204)
- def test_update_server(self):
+ def test_update_server_adminPass_ignored_v11(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
def server_update(context, id, params):
- filtered_dict = dict(name='server_test', admin_pass='bacon')
+ filtered_dict = dict(display_name='server_test')
self.assertEqual(params, filtered_dict)
+ return filtered_dict
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
- req = webob.Request.blank('/v1.0/servers/1')
+ req = webob.Request.blank('/v1.1/servers/1')
req.method = 'PUT'
+ req.content_type = "application/json"
req.body = self.body
- req.get_response(fakes.wsgi_app())
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 204)
def test_create_backup_schedules(self):
- req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule')
req.method = 'POST'
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status, '404 Not Found')
+ self.assertEqual(res.status_int, 501)
def test_delete_backup_schedules(self):
- req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule/1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status, '404 Not Found')
+ self.assertEqual(res.status_int, 501)
def test_get_server_backup_schedules(self):
- req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule')
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status, '404 Not Found')
+ self.assertEqual(res.status_int, 501)
- def test_get_all_server_details(self):
+ def test_get_server_backup_schedule(self):
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
+
+ def test_server_backup_schedule_deprecated_v11(self):
+ req = webob.Request.blank('/v1.1/servers/1/backup_schedule')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_all_server_details_v1_0(self):
req = webob.Request.blank('/v1.0/servers/detail')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
- i = 0
- for s in res_dict['servers']:
+ for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % i)
- self.assertEqual(s['imageId'], 10)
+ self.assertEqual(s['imageId'], '10')
+ self.assertEqual(s['flavorId'], '1')
+ self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], i)
- i += 1
+
+ def test_get_all_server_details_v1_1(self):
+ req = webob.Request.blank('/v1.1/servers/detail')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], '')
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10')
+ self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1')
+ self.assertEqual(s['status'], 'BUILD')
+ self.assertEqual(s['metadata']['seq'], i)
+
+ def test_get_all_server_details_with_host(self):
+ '''
+ We want to make sure that if two instances are on the same host, then
+ they return the same hostId. If two instances are on different hosts,
+ they should return different hostId's. In this test, there are 5
+ instances - 2 on one host and 3 on another.
+ '''
+
+ def stub_instance(id, user_id=1):
+ return Instance(id=id, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id, host='host%s' % (id % 2))
+
+ def return_servers_with_host(context, user_id=1):
+ return [stub_instance(i) for i in xrange(5)]
+
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
+ return_servers_with_host)
+
+ req = webob.Request.blank('/v1.0/servers/detail')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], host_ids[i % 2])
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s['imageId'], 10)
def test_server_pause(self):
FLAGS.allow_admin_api = True
@@ -367,7 +744,8 @@ class ServersTest(test.TestCase):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
- req = webob.Request.blank('/v1.0/servers/1/inject_network_info')
+ req = webob.Request.blank(
+ '/v1.0/servers/1/inject_network_info')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
@@ -386,17 +764,75 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
- def test_server_reboot(self):
- body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2, metadata={},
- personality={}))
+ def test_server_change_password(self):
+ body = {'changePassword': {'adminPass': '1234pass'}}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
- def test_server_rebuild(self):
+ def test_server_change_password_v1_1(self):
+
+ class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': '1234pass'}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(mock_method.instance_id, '1')
+ self.assertEqual(mock_method.password, '1234pass')
+
+ def test_server_change_password_bad_request_v1_1(self):
+ body = {'changePassword': {'pass': '12345'}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_change_password_empty_string_v1_1(self):
+ body = {'changePassword': {'adminPass': ''}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_change_password_none_v1_1(self):
+ body = {'changePassword': {'adminPass': None}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_change_password_not_a_string_v1_1(self):
+ body = {'changePassword': {'adminPass': 1234}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -406,7 +842,7 @@ class ServersTest(test.TestCase):
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- def test_server_resize(self):
+ def test_server_rebuild(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -431,3 +867,694 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status, '202 Accepted')
self.assertEqual(self.server_delete_called, True)
+
+ def test_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
+ def test_resize_bad_flavor_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3)))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 422)
+ self.assertEqual(self.resize_called, False)
+
+ def test_resize_raises_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
+
+ def resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_resized_server_has_correct_status(self):
+ req = self.webreq('/1', 'GET', dict(resize=dict(flavorId=3)))
+
+ def fake_migration_get(*args):
+ return {}
+
+ self.stubs.Set(nova.db, 'migration_get_by_instance_and_status',
+ fake_migration_get)
+ res = req.get_response(fakes.wsgi_app())
+ body = json.loads(res.body)
+ self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM')
+
+ def test_confirm_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
+
+ self.resize_called = False
+
+ def confirm_resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'confirm_resize',
+ confirm_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 204)
+ self.assertEqual(self.resize_called, True)
+
+ def test_confirm_resize_server_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
+
+ def confirm_resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'confirm_resize',
+ confirm_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_revert_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(revertResize=None))
+
+ self.resize_called = False
+
+ def revert_resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'revert_resize',
+ revert_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
+ def test_revert_resize_server_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(revertResize=None))
+
+ def revert_resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'revert_resize',
+ revert_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+
+class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
+
+ def setUp(self):
+ self.deserializer = servers.ServerCreateRequestXMLDeserializer()
+
+ def test_minimal_request(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata_and_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata/>
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata_and_personality_reversed(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality/>
+ <metadata/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_one_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality>
+ <file path="/etc/conf">aabbccdd</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_two_personalities(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf">aabbccdd</file>
+<file path="/etc/sudoers">abcd</file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"},
+ {"path": "/etc/sudoers", "contents": "abcd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_second_personality_node_ignored(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality>
+ <file path="/etc/conf">aabbccdd</file>
+ </personality>
+ <personality>
+ <file path="/etc/ignoreme">anything</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_missing_path(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file>aabbccdd</file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_empty_contents(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf"></file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": ""}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_empty_contents_variation(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf"/></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": ""}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha">beta</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "beta"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha">beta</meta>
+ <meta key="foo">bar</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "beta", "foo": "bar"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_missing_value(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha"></meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": ""}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata_missing_value(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha"/>
+ <meta key="delta"/>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "", "delta": ""}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_missing_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta>beta</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"": "beta"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata_missing_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta>beta</meta>
+ <meta>gamma</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"": "gamma"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_duplicate_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="foo">bar</meta>
+ <meta key="foo">baz</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"foo": "baz"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_canonical_request_from_docs(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">\
+ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp\
+dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k\
+IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs\
+c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g\
+QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo\
+ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv\
+dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy\
+c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6\
+b25zLiINCg0KLVJpY2hhcmQgQmFjaA==</file>
+ </personality>
+</server>"""
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {
+ "My Server Name": "Apache1",
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": """\
+ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp\
+dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k\
+IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs\
+c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g\
+QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo\
+ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv\
+dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy\
+c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6\
+b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
+ },
+ ],
+ }}
+ request = self.deserializer.deserialize(serial_request)
+ self.assertEqual(request, expected)
+
+
+class TestServerInstanceCreation(test.TestCase):
+
+ def setUp(self):
+ super(TestServerInstanceCreation, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.allow_admin = FLAGS.allow_admin_api
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(TestServerInstanceCreation, self).tearDown()
+
+ def _setup_mock_compute_api_for_personality(self):
+
+ class MockComputeAPI(nova.compute.API):
+
+ def __init__(self):
+ self.injected_files = None
+
+ def create(self, *args, **kwargs):
+ if 'injected_files' in kwargs:
+ self.injected_files = kwargs['injected_files']
+ else:
+ self.injected_files = None
+ return [{'id': '1234', 'display_name': 'fakeinstance'}]
+
+ def set_admin_password(self, *args, **kwargs):
+ pass
+
+ def make_stub_method(canned_return):
+ def stub_method(*args, **kwargs):
+ return canned_return
+ return stub_method
+
+ compute_api = MockComputeAPI()
+ self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
+ self.stubs.Set(nova.api.openstack.servers.Controller,
+ '_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
+ self.stubs.Set(nova.api.openstack.common,
+ 'get_image_id_from_image_hash', make_stub_method(2))
+ return compute_api
+
+ def _create_personality_request_dict(self, personality_files):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageId'] = 1
+ server['flavorId'] = 1
+ if personality_files is not None:
+ personalities = []
+ for path, contents in personality_files:
+ personalities.append({'path': path, 'contents': contents})
+ server['personality'] = personalities
+ return {'server': server}
+
+ def _get_create_request_json(self, body_dict):
+ req = webob.Request.blank('/v1.0/servers')
+ req.content_type = 'application/json'
+ req.method = 'POST'
+ req.body = json.dumps(body_dict)
+ return req
+
+ def _run_create_instance_with_mock_compute_api(self, request):
+ compute_api = self._setup_mock_compute_api_for_personality()
+ response = request.get_response(fakes.wsgi_app())
+ return compute_api, response
+
+ def _format_xml_request_body(self, body_dict):
+ server = body_dict['server']
+ body_parts = []
+ body_parts.extend([
+ '<?xml version="1.0" encoding="UTF-8"?>',
+ '<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"',
+ ' name="%s" imageId="%s" flavorId="%s">' % (
+ server['name'], server['imageId'], server['flavorId'])])
+ if 'metadata' in server:
+ metadata = server['metadata']
+ body_parts.append('<metadata>')
+ for item in metadata.iteritems():
+ body_parts.append('<meta key="%s">%s</meta>' % item)
+ body_parts.append('</metadata>')
+ if 'personality' in server:
+ personalities = server['personality']
+ body_parts.append('<personality>')
+ for file in personalities:
+ item = (file['path'], file['contents'])
+ body_parts.append('<file path="%s">%s</file>' % item)
+ body_parts.append('</personality>')
+ body_parts.append('</server>')
+ return ''.join(body_parts)
+
+ def _get_create_request_xml(self, body_dict):
+ req = webob.Request.blank('/v1.0/servers')
+ req.content_type = 'application/xml'
+ req.accept = 'application/xml'
+ req.method = 'POST'
+ req.body = self._format_xml_request_body(body_dict)
+ return req
+
+ def _create_instance_with_personality_json(self, personality):
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.injected_files
+
+ def _create_instance_with_personality_xml(self, personality):
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_xml(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.injected_files
+
+ def test_create_instance_with_no_personality(self):
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality=None)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [])
+
+ def test_create_instance_with_no_personality_xml(self):
+ request, response, injected_files = \
+ self._create_instance_with_personality_xml(personality=None)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [])
+
+ def test_create_instance_with_personality(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Hello, World!"\n'
+ b64contents = base64.b64encode(contents)
+ personality = [(path, b64contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_with_personality_xml(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Hello, World!"\n'
+ b64contents = base64.b64encode(contents)
+ personality = [(path, b64contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_xml(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_with_personality_no_path(self):
+ personality = [('/remove/this/path',
+ base64.b64encode('my\n\file\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ del body_dict['server']['personality'][0]['path']
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def _test_create_instance_with_personality_no_path_xml(self):
+ personality = [('/remove/this/path',
+ base64.b64encode('my\n\file\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_xml(body_dict)
+ request.body = request.body.replace(' path="/remove/this/path"', '')
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_no_contents(self):
+ personality = [('/test/path',
+ base64.b64encode('remove\nthese\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ del body_dict['server']['personality'][0]['contents']
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_not_a_list(self):
+ personality = [('/test/path', base64.b64encode('test\ncontents\n'))]
+ body_dict = self._create_personality_request_dict(personality)
+ body_dict['server']['personality'] = \
+ body_dict['server']['personality'][0]
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_with_non_b64_content(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Oh no!"\n'
+ personality = [(path, contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(injected_files, None)
+
+ def test_create_instance_with_null_personality(self):
+ personality = None
+ body_dict = self._create_personality_request_dict(personality)
+ body_dict['server']['personality'] = None
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 200)
+
+ def test_create_instance_with_three_personalities(self):
+ files = [
+ ('/etc/sudoers', 'ALL ALL=NOPASSWD: ALL\n'),
+ ('/etc/motd', 'Enjoy your root access!\n'),
+ ('/etc/dovecot.conf', 'dovecot\nconfig\nstuff\n'),
+ ]
+ personality = []
+ for path, content in files:
+ personality.append((path, base64.b64encode(content)))
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, files)
+
+ def test_create_instance_personality_empty_content(self):
+ path = '/my/file/path'
+ contents = ''
+ personality = [(path, contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_admin_pass_json(self):
+ request, response, dummy = \
+ self._create_instance_with_personality_json(None)
+ self.assertEquals(response.status_int, 200)
+ response = json.loads(response.body)
+ self.assertTrue('adminPass' in response['server'])
+ self.assertEqual(16, len(response['server']['adminPass']))
+
+ def test_create_instance_admin_pass_xml(self):
+ request, response, dummy = \
+ self._create_instance_with_personality_xml(None)
+ self.assertEquals(response.status_int, 200)
+ dom = minidom.parseString(response.body)
+ server = dom.childNodes[0]
+ self.assertEquals(server.nodeName, 'server')
+ self.assertEqual(16, len(server.getAttribute('adminPass')))
+
+
+class TestGetKernelRamdiskFromImage(test.TestCase):
+ """
+ If we're building from an AMI-style image, we need to be able to fetch the
+ kernel and ramdisk associated with the machine image. This information is
+ stored with the image metadata and return via the ImageService.
+
+ These tests ensure that we parse the metadata return the ImageService
+ correctly and that we handle failure modes appropriately.
+ """
+
+ def test_status_not_active(self):
+ """We should only allow fetching of kernel and ramdisk information if
+ we have a 'fully-formed' image, aka 'active'
+ """
+ image_meta = {'id': 1, 'status': 'queued'}
+ self.assertRaises(exception.Invalid, self._get_k_r, image_meta)
+
+ def test_not_ami(self):
+ """Anything other than ami should return no kernel and no ramdisk"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'vhd'}}
+ kernel_id, ramdisk_id = self._get_k_r(image_meta)
+ self.assertEqual(kernel_id, None)
+ self.assertEqual(ramdisk_id, None)
+
+ def test_ami_no_kernel(self):
+ """If an ami is missing a kernel it should raise NotFound"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'ami', 'ramdisk_id': 1}}
+ self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
+
+ def test_ami_no_ramdisk(self):
+ """If an ami is missing a ramdisk it should raise NotFound"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'ami', 'kernel_id': 1}}
+ self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
+
+ def test_ami_kernel_ramdisk_present(self):
+ """Return IDs if both kernel and ramdisk are present"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'ami', 'kernel_id': 1,
+ 'ramdisk_id': 2}}
+ kernel_id, ramdisk_id = self._get_k_r(image_meta)
+ self.assertEqual(kernel_id, 1)
+ self.assertEqual(ramdisk_id, 2)
+
+ @staticmethod
+ def _get_k_r(image_meta):
+ """Rebinding function to a shorter name for convenience"""
+ kernel_id, ramdisk_id = \
+ servers.Controller._do_get_kernel_ramdisk_from_image(image_meta)
+ return kernel_id, ramdisk_id
diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index b4de2ef41..c2bd7e45a 100644
--- a/nova/tests/api/openstack/test_shared_ip_groups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -16,25 +16,49 @@
# under the License.
import stubout
+import webob
from nova import test
from nova.api.openstack import shared_ip_groups
+from nova.tests.api.openstack import fakes
class SharedIpGroupsTest(test.TestCase):
def setUp(self):
super(SharedIpGroupsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
def tearDown(self):
self.stubs.UnsetAll()
super(SharedIpGroupsTest, self).tearDown()
def test_get_shared_ip_groups(self):
- pass
+ req = webob.Request.blank('/v1.0/shared_ip_groups')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
def test_create_shared_ip_group(self):
- pass
+ req = webob.Request.blank('/v1.0/shared_ip_groups')
+ req.method = 'POST'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
+
+ def test_update_shared_ip_group(self):
+ req = webob.Request.blank('/v1.0/shared_ip_groups/12')
+ req.method = 'PUT'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
def test_delete_shared_ip_group(self):
- pass
+ req = webob.Request.blank('/v1.0/shared_ip_groups/12')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
+
+ def test_deprecated_v11(self):
+ req = webob.Request.blank('/v1.1/shared_ip_groups')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py
new file mode 100644
index 000000000..effb2f592
--- /dev/null
+++ b/nova/tests/api/openstack/test_users.py
@@ -0,0 +1,159 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import stubout
+import webob
+
+from nova import flags
+from nova import test
+from nova import utils
+from nova.api.openstack import users
+from nova.auth.manager import User, Project
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def fake_init(self):
+ self.manager = fakes.FakeAuthManager()
+
+
+def fake_admin_check(self, req):
+ return True
+
+
+class UsersTest(test.TestCase):
+ def setUp(self):
+ super(UsersTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(users.Controller, '__init__',
+ fake_init)
+ self.stubs.Set(users.Controller, '_check_admin',
+ fake_admin_check)
+ fakes.FakeAuthManager.clear_fakes()
+ fakes.FakeAuthManager.projects = dict(testacct=Project('testacct',
+ 'testacct',
+ 'id1',
+ 'test',
+ []))
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+ fakemgr = fakes.FakeAuthManager()
+ fakemgr.add_user(User('id1', 'guy1', 'acc1', 'secret1', False))
+ fakemgr.add_user(User('id2', 'guy2', 'acc2', 'secret2', True))
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(UsersTest, self).tearDown()
+
+ def test_get_user_list(self):
+ req = webob.Request.blank('/v1.0/users')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(len(res_dict['users']), 2)
+
+ def test_get_user_by_id(self):
+ req = webob.Request.blank('/v1.0/users/id2')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['user']['id'], 'id2')
+ self.assertEqual(res_dict['user']['name'], 'guy2')
+ self.assertEqual(res_dict['user']['secret'], 'secret2')
+ self.assertEqual(res_dict['user']['admin'], True)
+ self.assertEqual(res.status_int, 200)
+
+ def test_user_delete(self):
+ # Check the user exists
+ req = webob.Request.blank('/v1.0/users/id1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['user']['id'], 'id1')
+ self.assertEqual(res.status_int, 200)
+
+ # Delete the user
+ req = webob.Request.blank('/v1.0/users/id1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertTrue('id1' not in [u.id for u in
+ fakes.FakeAuthManager.auth_data])
+ self.assertEqual(res.status_int, 200)
+
+ # Check the user is not returned (and returns 404)
+ req = webob.Request.blank('/v1.0/users/id1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 404)
+
+ def test_user_create(self):
+ secret = utils.generate_password()
+ body = dict(user=dict(name='test_guy',
+ access='acc3',
+ secret=secret,
+ admin=True))
+ req = webob.Request.blank('/v1.0/users')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'POST'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+
+ # NOTE(justinsb): This is a questionable assertion in general
+ # fake sets id=name, but others might not...
+ self.assertEqual(res_dict['user']['id'], 'test_guy')
+
+ self.assertEqual(res_dict['user']['name'], 'test_guy')
+ self.assertEqual(res_dict['user']['access'], 'acc3')
+ self.assertEqual(res_dict['user']['secret'], secret)
+ self.assertEqual(res_dict['user']['admin'], True)
+ self.assertTrue('test_guy' in [u.id for u in
+ fakes.FakeAuthManager.auth_data])
+ self.assertEqual(len(fakes.FakeAuthManager.auth_data), 3)
+
+ def test_user_update(self):
+ new_secret = utils.generate_password()
+ body = dict(user=dict(name='guy2',
+ access='acc2',
+ secret=new_secret))
+ req = webob.Request.blank('/v1.0/users/id2')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['user']['id'], 'id2')
+ self.assertEqual(res_dict['user']['name'], 'guy2')
+ self.assertEqual(res_dict['user']['access'], 'acc2')
+ self.assertEqual(res_dict['user']['secret'], new_secret)
+ self.assertEqual(res_dict['user']['admin'], True)
diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py
new file mode 100644
index 000000000..2640a4ddb
--- /dev/null
+++ b/nova/tests/api/openstack/test_versions.py
@@ -0,0 +1,123 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import webob
+
+from nova import context
+from nova import test
+from nova.tests.api.openstack import fakes
+from nova.api.openstack import views
+
+
+class VersionsTest(test.TestCase):
+ def setUp(self):
+ super(VersionsTest, self).setUp()
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ super(VersionsTest, self).tearDown()
+
+ def test_get_version_list(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ versions = json.loads(res.body)["versions"]
+ expected = [
+ {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1",
+ }
+ ],
+ },
+ {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.0",
+ }
+ ],
+ },
+ ]
+ self.assertEqual(versions, expected)
+
+ def test_get_version_list_xml(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+
+ expected = """<versions>
+ <version id="v1.1" status="CURRENT">
+ <links>
+ <link href="http://localhost/v1.1" rel="self"/>
+ </links>
+ </version>
+ <version id="v1.0" status="DEPRECATED">
+ <links>
+ <link href="http://localhost/v1.0" rel="self"/>
+ </links>
+ </version>
+ </versions>""".replace(" ", "").replace("\n", "")
+
+ actual = res.body.replace(" ", "").replace("\n", "")
+
+ self.assertEqual(expected, actual)
+
+ def test_view_builder(self):
+ base_url = "http://example.org/"
+
+ version_data = {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ }
+
+ expected = {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://example.org/3.2.1",
+ },
+ ],
+ }
+
+ builder = views.versions.ViewBuilder(base_url)
+ output = builder.build(version_data)
+
+ self.assertEqual(output, expected)
+
+ def test_generate_href(self):
+ base_url = "http://example.org/app/"
+ version_number = "v1.4.6"
+
+ expected = "http://example.org/app/v1.4.6"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href(version_number)
+
+ self.assertEqual(actual, expected)
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index 555b206b9..a3f191aaa 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -1,4 +1,4 @@
-# Copyright 2010 OpenStack LLC.
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -24,6 +24,7 @@ from nova import flags
from nova import test
from nova.api.openstack import zones
from nova.tests.api.openstack import fakes
+from nova.scheduler import api
FLAGS = flags.FLAGS
@@ -31,7 +32,7 @@ FLAGS.verbose = True
def zone_get(context, zone_id):
- return dict(id=1, api_url='http://foo.com', username='bob',
+ return dict(id=1, api_url='http://example.com', username='bob',
password='xxx')
@@ -42,7 +43,7 @@ def zone_create(context, values):
def zone_update(context, zone_id, values):
- zone = dict(id=zone_id, api_url='http://foo.com', username='bob',
+ zone = dict(id=zone_id, api_url='http://example.com', username='bob',
password='xxx')
zone.update(values)
return zone
@@ -52,19 +53,37 @@ def zone_delete(context, zone_id):
pass
-def zone_get_all(context):
+def zone_get_all_scheduler(*args):
return [
- dict(id=1, api_url='http://foo.com', username='bob',
+ dict(id=1, api_url='http://example.com', username='bob',
password='xxx'),
- dict(id=2, api_url='http://blah.com', username='alice',
- password='qwerty')]
+ dict(id=2, api_url='http://example.org', username='alice',
+ password='qwerty'),
+ ]
+
+
+def zone_get_all_scheduler_empty(*args):
+ return []
+
+
+def zone_get_all_db(context):
+ return [
+ dict(id=1, api_url='http://example.com', username='bob',
+ password='xxx'),
+ dict(id=2, api_url='http://example.org', username='alice',
+ password='qwerty'),
+ ]
+
+
+def zone_capabilities(method, context, params):
+ return dict()
class ZonesTest(test.TestCase):
def setUp(self):
super(ZonesTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -74,17 +93,22 @@ class ZonesTest(test.TestCase):
FLAGS.allow_admin_api = True
self.stubs.Set(nova.db, 'zone_get', zone_get)
- self.stubs.Set(nova.db, 'zone_get_all', zone_get_all)
self.stubs.Set(nova.db, 'zone_update', zone_update)
self.stubs.Set(nova.db, 'zone_create', zone_create)
self.stubs.Set(nova.db, 'zone_delete', zone_delete)
+ self.old_zone_name = FLAGS.zone_name
+ self.old_zone_capabilities = FLAGS.zone_capabilities
+
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ FLAGS.zone_name = self.old_zone_name
+ FLAGS.zone_capabilities = self.old_zone_capabilities
super(ZonesTest, self).tearDown()
- def test_get_zone_list(self):
+ def test_get_zone_list_scheduler(self):
+ self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler)
req = webob.Request.blank('/v1.0/zones')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
@@ -92,47 +116,77 @@ class ZonesTest(test.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['zones']), 2)
+ def test_get_zone_list_db(self):
+ self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler_empty)
+ self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
+ req = webob.Request.blank('/v1.0/zones')
+ req.headers["Content-Type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(len(res_dict['zones']), 2)
+
def test_get_zone_by_id(self):
req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
- self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
+ self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('password' in res_dict['zone'])
- self.assertEqual(res.status_int, 200)
def test_zone_delete(self):
req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
def test_zone_create(self):
- body = dict(zone=dict(api_url='http://blah.zoo', username='fred',
+ body = dict(zone=dict(api_url='http://example.com', username='fred',
password='fubar'))
req = webob.Request.blank('/v1.0/zones')
+ req.headers["Content-Type"] = "application/json"
req.method = 'POST'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
- self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo')
+ self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('username' in res_dict['zone'])
def test_zone_update(self):
body = dict(zone=dict(username='zeb', password='sneaky'))
req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
- self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
+ self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('username' in res_dict['zone'])
+
+ def test_zone_info(self):
+ FLAGS.zone_name = 'darksecret'
+ FLAGS.zone_capabilities = ['cap1=a;b', 'cap2=c;d']
+ self.stubs.Set(api, '_call_scheduler', zone_capabilities)
+
+ body = dict(zone=dict(username='zeb', password='sneaky'))
+ req = webob.Request.blank('/v1.0/zones/info')
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['zone']['name'], 'darksecret')
+ self.assertEqual(res_dict['zone']['cap1'], 'a;b')
+ self.assertEqual(res_dict['zone']['cap2'], 'c;d')
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 2c7852214..1ecdd1cfb 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -21,11 +21,13 @@
Test WSGI basics and provide some helper functions for other WSGI tests.
"""
+import json
from nova import test
import routes
import webob
+from nova import exception
from nova import wsgi
@@ -66,63 +68,164 @@ class Test(test.TestCase):
result = webob.Request.blank('/bad').get_response(Router())
self.assertNotEqual(result.body, "Router result")
- def test_controller(self):
- class Controller(wsgi.Controller):
- """Test controller to call from router."""
- test = self
-
- def show(self, req, id): # pylint: disable-msg=W0622,C0103
- """Default action called for requests with an ID."""
- self.test.assertEqual(req.path_info, '/tests/123')
- self.test.assertEqual(id, '123')
- return id
-
- class Router(wsgi.Router):
- """Test router."""
-
- def __init__(self):
- mapper = routes.Mapper()
- mapper.resource("test", "tests", controller=Controller())
- super(Router, self).__init__(mapper)
-
- result = webob.Request.blank('/tests/123').get_response(Router())
- self.assertEqual(result.body, "123")
- result = webob.Request.blank('/test/123').get_response(Router())
- self.assertNotEqual(result.body, "123")
+class ControllerTest(test.TestCase):
+
+ class TestRouter(wsgi.Router):
+
+ class TestController(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "test": ["id"]}}}
+
+ def show(self, req, id): # pylint: disable=W0622,C0103
+ return {"test": {"id": id}}
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.resource("test", "tests", controller=self.TestController())
+ wsgi.Router.__init__(self, mapper)
+
+ def test_show(self):
+ request = wsgi.Request.blank('/tests/123')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
+
+ def test_response_content_type_from_accept_xml(self):
+ request = webob.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/xml")
+
+ def test_response_content_type_from_accept_json(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+ def test_response_content_type_from_query_extension_xml(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/xml")
+
+ def test_response_content_type_from_query_extension_json(self):
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+ def test_response_content_type_default_when_unsupported(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.status_int, 200)
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+
+class RequestTest(test.TestCase):
+
+ def test_request_content_type_missing(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.body = "<body />"
+ self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+
+ def test_request_content_type_unsupported(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "text/html"
+ request.body = "asdf<br />"
+ self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+
+ def test_content_type_from_accept_xml(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml, application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = \
+ "application/json; q=0.3, application/xml; q=0.9"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_from_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123.invalid')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_accept_and_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_accept_default(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
class SerializerTest(test.TestCase):
- def match(self, url, accept, expect):
+ def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<servers><a>(2,3)</a></servers>'
+ serializer = wsgi.Serializer()
+ result = serializer.serialize(input_dict, "application/xml")
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_xml)
+
+ def test_json(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
- req = webob.Request.blank(url, headers=dict(Accept=accept))
- result = wsgi.Serializer(req.environ).to_content_type(input_dict)
+ serializer = wsgi.Serializer()
+ result = serializer.serialize(input_dict, "application/json")
result = result.replace('\n', '').replace(' ', '')
- if expect == 'xml':
- self.assertEqual(result, expected_xml)
- elif expect == 'json':
- self.assertEqual(result, expected_json)
- else:
- raise "Bad expect value"
-
- def test_basic(self):
- self.match('/servers/4.json', None, expect='json')
- self.match('/servers/4', 'application/json', expect='json')
- self.match('/servers/4', 'application/xml', expect='xml')
- self.match('/servers/4.xml', None, expect='xml')
-
- def test_defaults_to_json(self):
- self.match('/servers/4', None, expect='json')
- self.match('/servers/4', 'text/html', expect='json')
-
- def test_suffix_takes_precedence_over_accept_header(self):
- self.match('/servers/4.xml', 'application/json', expect='xml')
- self.match('/servers/4.xml.', 'application/json', expect='json')
-
- def test_deserialize(self):
+ self.assertEqual(result, expected_json)
+
+ def test_unsupported_content_type(self):
+ serializer = wsgi.Serializer()
+ self.assertRaises(exception.InvalidContentType, serializer.serialize,
+ {}, "text/null")
+
+ def test_deserialize_json(self):
+ data = """{"a": {
+ "a1": "1",
+ "a2": "2",
+ "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
+ "d": {"e": "1"},
+ "f": "1"}}"""
+ as_dict = dict(a={
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
+ 'd': {'e': '1'},
+ 'f': '1'})
+ metadata = {}
+ serializer = wsgi.Serializer(metadata)
+ self.assertEqual(serializer.deserialize(data, "application/json"),
+ as_dict)
+
+ def test_deserialize_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
@@ -137,11 +240,13 @@ class SerializerTest(test.TestCase):
'd': {'e': '1'},
'f': '1'})
metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
- serializer = wsgi.Serializer({}, metadata)
- self.assertEqual(serializer.deserialize(xml), as_dict)
+ serializer = wsgi.Serializer(metadata)
+ self.assertEqual(serializer.deserialize(xml, "application/xml"),
+ as_dict)
def test_deserialize_empty_xml(self):
xml = """<a></a>"""
as_dict = {"a": {}}
- serializer = wsgi.Serializer({})
- self.assertEqual(serializer.deserialize(xml), as_dict)
+ serializer = wsgi.Serializer()
+ self.assertEqual(serializer.deserialize(xml, "application/xml"),
+ as_dict)
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 05bdd172e..7ddfe377a 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -20,15 +20,75 @@
import time
from nova import db
+from nova import test
from nova import utils
-from nova.compute import instance_types
-def stub_out_db_instance_api(stubs):
- """ Stubs out the db API for creating Instances """
+def stub_out_db_instance_api(stubs, injected=True):
+ """Stubs out the db API for creating Instances."""
+
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512,
+ vcpus=1,
+ local_gb=0,
+ flavorid=1,
+ rxtx_cap=1),
+ 'm1.small': dict(memory_mb=2048,
+ vcpus=1,
+ local_gb=20,
+ flavorid=2,
+ rxtx_cap=2),
+ 'm1.medium':
+ dict(memory_mb=4096,
+ vcpus=2,
+ local_gb=40,
+ flavorid=3,
+ rxtx_cap=3),
+ 'm1.large': dict(memory_mb=8192,
+ vcpus=4,
+ local_gb=80,
+ flavorid=4,
+ rxtx_cap=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384,
+ vcpus=8,
+ local_gb=160,
+ flavorid=5,
+ rxtx_cap=5)}
+
+ flat_network_fields = {'id': 'fake_flat',
+ 'bridge': 'xenbr0',
+ 'label': 'fake_flat_network',
+ 'netmask': '255.255.255.0',
+ 'cidr_v6': 'fe80::a00:0/120',
+ 'netmask_v6': '120',
+ 'gateway': '10.0.0.1',
+ 'gateway_v6': 'fe80::a00:1',
+ 'broadcast': '10.0.0.255',
+ 'dns': '10.0.0.2',
+ 'ra_server': None,
+ 'injected': injected}
+
+ vlan_network_fields = {'id': 'fake_vlan',
+ 'bridge': 'br111',
+ 'label': 'fake_vlan_network',
+ 'netmask': '255.255.255.0',
+ 'cidr_v6': 'fe80::a00:0/120',
+ 'netmask_v6': '120',
+ 'gateway': '10.0.0.1',
+ 'gateway_v6': 'fe80::a00:1',
+ 'broadcast': '10.0.0.255',
+ 'dns': '10.0.0.2',
+ 'ra_server': None,
+ 'vlan': 111,
+ 'injected': False}
+
+ fixed_ip_fields = {'address': '10.0.0.3',
+ 'address_v6': 'fe80::a00:3',
+ 'network_id': 'fake_flat'}
class FakeModel(object):
- """ Stubs out for model """
+ """Stubs out for model."""
def __init__(self, values):
self.values = values
@@ -41,35 +101,46 @@ def stub_out_db_instance_api(stubs):
else:
raise NotImplementedError()
- def fake_instance_create(values):
- """ Stubs out the db.instance_create method """
-
- type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
-
- base_options = {
- 'name': values['name'],
- 'id': values['id'],
- 'reservation_id': utils.generate_uid('r'),
- 'image_id': values['image_id'],
- 'kernel_id': values['kernel_id'],
- 'ramdisk_id': values['ramdisk_id'],
- 'state_description': 'scheduling',
- 'user_id': values['user_id'],
- 'project_id': values['project_id'],
- 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
- 'instance_type': values['instance_type'],
- 'memory_mb': type_data['memory_mb'],
- 'mac_address': values['mac_address'],
- 'vcpus': type_data['vcpus'],
- 'local_gb': type_data['local_gb'],
- }
- return FakeModel(base_options)
+ def fake_instance_type_get_all(context, inactive=0):
+ return INSTANCE_TYPES
+
+ def fake_instance_type_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
def fake_network_get_by_instance(context, instance_id):
- fields = {
- 'bridge': 'xenbr0',
- }
- return FakeModel(fields)
+ # Even instance numbers are on vlan networks
+ if instance_id % 2 == 0:
+ return FakeModel(vlan_network_fields)
+ else:
+ return FakeModel(flat_network_fields)
+ return FakeModel(network_fields)
+
+ def fake_network_get_all_by_instance(context, instance_id):
+ # Even instance numbers are on vlan networks
+ if instance_id % 2 == 0:
+ return [FakeModel(vlan_network_fields)]
+ else:
+ return [FakeModel(flat_network_fields)]
+
+ def fake_instance_get_fixed_address(context, instance_id):
+ return FakeModel(fixed_ip_fields).address
+
+ def fake_instance_get_fixed_address_v6(context, instance_id):
+ return FakeModel(fixed_ip_fields).address
+
+ def fake_fixed_ip_get_all_by_instance(context, instance_id):
+ return [FakeModel(fixed_ip_fields)]
- stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
+ stubs.Set(db, 'network_get_all_by_instance',
+ fake_network_get_all_by_instance)
+ stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
+ stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
+ stubs.Set(db, 'instance_get_fixed_address',
+ fake_instance_get_fixed_address)
+ stubs.Set(db, 'instance_get_fixed_address_v6',
+ fake_instance_get_fixed_address_v6)
+ stubs.Set(db, 'network_get_all_by_instance',
+ fake_network_get_all_by_instance)
+ stubs.Set(db, 'fixed_ip_get_all_by_instance',
+ fake_fixed_ip_get_all_by_instance)
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index cbd949477..5d7ca98b5 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -32,6 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS.network_size = 8
FLAGS.num_networks = 2
FLAGS.fake_network = True
+FLAGS.image_service = 'nova.image.local.LocalImageService'
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py
new file mode 100644
index 000000000..be59970c9
--- /dev/null
+++ b/nova/tests/fake_utils.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This modules stubs out functions in nova.utils."""
+
+import re
+import types
+
+from eventlet import greenthread
+
+from nova import exception
+from nova import log as logging
+from nova import utils
+
+LOG = logging.getLogger('nova.tests.fake_utils')
+
+_fake_execute_repliers = []
+_fake_execute_log = []
+
+
+def fake_execute_get_log():
+ return _fake_execute_log
+
+
+def fake_execute_clear_log():
+ global _fake_execute_log
+ _fake_execute_log = []
+
+
+def fake_execute_set_repliers(repliers):
+ """Allows the client to configure replies to commands."""
+ global _fake_execute_repliers
+ _fake_execute_repliers = repliers
+
+
+def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
+ """A reply handler for commands that haven't been added to the reply list.
+
+ Returns empty strings for stdout and stderr.
+
+ """
+ return '', ''
+
+
+def fake_execute(*cmd_parts, **kwargs):
+ """This function stubs out execute.
+
+ It optionally executes a preconfigued function to return expected data.
+
+ """
+ global _fake_execute_repliers
+
+ process_input = kwargs.get('process_input', None)
+ addl_env = kwargs.get('addl_env', None)
+ check_exit_code = kwargs.get('check_exit_code', 0)
+ cmd_str = ' '.join(str(part) for part in cmd_parts)
+
+ LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str)
+ _fake_execute_log.append(cmd_str)
+
+ reply_handler = fake_execute_default_reply_handler
+
+ for fake_replier in _fake_execute_repliers:
+ if re.match(fake_replier[0], cmd_str):
+ reply_handler = fake_replier[1]
+ LOG.debug(_('Faked command matched %s') % fake_replier[0])
+ break
+
+ if isinstance(reply_handler, basestring):
+ # If the reply handler is a string, return it as stdout
+ reply = reply_handler, ''
+ else:
+ try:
+ # Alternative is a function, so call it
+ reply = reply_handler(cmd_parts,
+ process_input=process_input,
+ addl_env=addl_env,
+ check_exit_code=check_exit_code)
+ except exception.ProcessExecutionError as e:
+ LOG.debug(_('Faked command raised an exception %s' % str(e)))
+ raise
+
+ stdout = reply[0]
+ stderr = reply[1]
+ LOG.debug(_("Reply to faked command is stdout='%(stdout)s' "
+ "stderr='%(stderr)s'") % locals())
+
+ # Replicate the sleep call in the real function
+ greenthread.sleep(0)
+ return reply
+
+
+def stub_out_utils_execute(stubs):
+ fake_execute_set_repliers([])
+ fake_execute_clear_log()
+ stubs.Set(utils, 'execute', fake_execute)
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index f182b857a..5872552ec 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -26,12 +26,45 @@ def stubout_glance_client(stubs, cls):
class FakeGlance(object):
+ IMAGE_MACHINE = 1
+ IMAGE_KERNEL = 2
+ IMAGE_RAMDISK = 3
+ IMAGE_RAW = 4
+ IMAGE_VHD = 5
+
+ IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'disk_format': 'ami',
+ 'container_format': 'ami'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'disk_format': 'aki',
+ 'container_format': 'aki'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'disk_format': 'ari',
+ 'container_format': 'ari'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'disk_format': 'raw',
+ 'container_format': 'bare'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'disk_format': 'vhd',
+ 'container_format': 'ovf'},
+ 'image_data': StringIO.StringIO('')}}
+
def __init__(self, host, port=None, use_ssl=False):
pass
- def get_image(self, image):
- meta = {
- 'size': 0,
- }
- image_file = StringIO.StringIO('')
- return meta, image_file
+ def get_image_meta(self, image_id):
+ return self.IMAGE_FIXTURES[image_id]['image_meta']
+
+ def get_image(self, image_id):
+ image = self.IMAGE_FIXTURES[image_id]
+ return image['image_meta'], image['image_data']
diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py
index 3980ae3cb..042819b9c 100644
--- a/nova/tests/hyperv_unittest.py
+++ b/nova/tests/hyperv_unittest.py
@@ -51,7 +51,7 @@ class HyperVTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, instance)
conn = hyperv.get_connection(False)
- conn._create_vm(instance_ref) # pylint: disable-msg=W0212
+ conn._create_vm(instance_ref) # pylint: disable=W0212
found = [n for n in conn.list_instances()
if n == instance_ref['name']]
self.assertTrue(len(found) == 1)
diff --git a/nova/tests/image/__init__.py b/nova/tests/image/__init__.py
new file mode 100644
index 000000000..b94e2e54e
--- /dev/null
+++ b/nova/tests/image/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
new file mode 100644
index 000000000..9d0b14613
--- /dev/null
+++ b/nova/tests/image/test_glance.py
@@ -0,0 +1,236 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import datetime
+import unittest
+
+from nova import context
+from nova import test
+from nova.image import glance
+
+
+class StubGlanceClient(object):
+
+ def __init__(self, images, add_response=None, update_response=None):
+ self.images = images
+ self.add_response = add_response
+ self.update_response = update_response
+
+ def get_image_meta(self, image_id):
+ return self.images[image_id]
+
+ def get_images_detailed(self):
+ return self.images.itervalues()
+
+ def get_image(self, image_id):
+ return self.images[image_id], []
+
+ def add_image(self, metadata, data):
+ return self.add_response
+
+ def update_image(self, image_id, metadata, data):
+ return self.update_response
+
+
+class NullWriter(object):
+ """Used to test ImageService.get which takes a writer object"""
+
+ def write(self, *arg, **kwargs):
+ pass
+
+
+class BaseGlanceTest(unittest.TestCase):
+ NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
+ NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
+ NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22)
+
+ def setUp(self):
+ # FIXME(sirp): we can probably use stubs library here rather than
+ # dependency injection
+ self.client = StubGlanceClient(None)
+ self.service = glance.GlanceImageService(self.client)
+ self.context = context.RequestContext(None, None)
+
+ def assertDateTimesFilled(self, image_meta):
+ self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
+ self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
+ self.assertEqual(image_meta['deleted_at'], self.NOW_DATETIME)
+
+ def assertDateTimesEmpty(self, image_meta):
+ self.assertEqual(image_meta['updated_at'], None)
+ self.assertEqual(image_meta['deleted_at'], None)
+
+ def assertDateTimesBlank(self, image_meta):
+ self.assertEqual(image_meta['updated_at'], '')
+ self.assertEqual(image_meta['deleted_at'], '')
+
+
+class TestGlanceImageServiceProperties(BaseGlanceTest):
+ def test_show_passes_through_to_client(self):
+ """Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the
+ properties dict
+ """
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'foo': 'bar',
+ 'properties': {'prop1': 'propvalue1'}}}
+ self.client.images = fixtures
+ image_meta = self.service.show(self.context, 'image1')
+
+ expected = {'name': 'image1', 'is_public': True,
+ 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}
+ self.assertEqual(image_meta, expected)
+
+ def test_detail_passes_through_to_client(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'foo': 'bar',
+ 'properties': {'prop1': 'propvalue1'}}}
+ self.client.images = fixtures
+ image_meta = self.service.detail(self.context)
+ expected = [{'name': 'image1', 'is_public': True,
+ 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}]
+ self.assertEqual(image_meta, expected)
+
+
+class TestGetterDateTimeNoneTests(BaseGlanceTest):
+
+ def test_show_handles_none_datetimes(self):
+ self.client.images = self._make_none_datetime_fixtures()
+ image_meta = self.service.show(self.context, 'image1')
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_show_handles_blank_datetimes(self):
+ self.client.images = self._make_blank_datetime_fixtures()
+ image_meta = self.service.show(self.context, 'image1')
+ self.assertDateTimesBlank(image_meta)
+
+ def test_detail_handles_none_datetimes(self):
+ self.client.images = self._make_none_datetime_fixtures()
+ image_meta = self.service.detail(self.context)[0]
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_detail_handles_blank_datetimes(self):
+ self.client.images = self._make_blank_datetime_fixtures()
+ image_meta = self.service.detail(self.context)[0]
+ self.assertDateTimesBlank(image_meta)
+
+ def test_get_handles_none_datetimes(self):
+ self.client.images = self._make_none_datetime_fixtures()
+ writer = NullWriter()
+ image_meta = self.service.get(self.context, 'image1', writer)
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_get_handles_blank_datetimes(self):
+ self.client.images = self._make_blank_datetime_fixtures()
+ writer = NullWriter()
+ image_meta = self.service.get(self.context, 'image1', writer)
+ self.assertDateTimesBlank(image_meta)
+
+ def test_show_makes_datetimes(self):
+ self.client.images = self._make_datetime_fixtures()
+ image_meta = self.service.show(self.context, 'image1')
+ self.assertDateTimesFilled(image_meta)
+ image_meta = self.service.show(self.context, 'image2')
+ self.assertDateTimesFilled(image_meta)
+
+ def test_detail_makes_datetimes(self):
+ self.client.images = self._make_datetime_fixtures()
+ image_meta = self.service.detail(self.context)[0]
+ self.assertDateTimesFilled(image_meta)
+ image_meta = self.service.detail(self.context)[1]
+ self.assertDateTimesFilled(image_meta)
+
+ def test_get_makes_datetimes(self):
+ self.client.images = self._make_datetime_fixtures()
+ writer = NullWriter()
+ image_meta = self.service.get(self.context, 'image1', writer)
+ self.assertDateTimesFilled(image_meta)
+ image_meta = self.service.get(self.context, 'image2', writer)
+ self.assertDateTimesFilled(image_meta)
+
+ def _make_datetime_fixtures(self):
+ fixtures = {
+ 'image1': {
+ 'name': 'image1',
+ 'is_public': True,
+ 'created_at': self.NOW_GLANCE_FORMAT,
+ 'updated_at': self.NOW_GLANCE_FORMAT,
+ 'deleted_at': self.NOW_GLANCE_FORMAT,
+ },
+ 'image2': {
+ 'name': 'image2',
+ 'is_public': True,
+ 'created_at': self.NOW_GLANCE_OLD_FORMAT,
+ 'updated_at': self.NOW_GLANCE_OLD_FORMAT,
+ 'deleted_at': self.NOW_GLANCE_OLD_FORMAT,
+ },
+ }
+ return fixtures
+
+ def _make_none_datetime_fixtures(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'updated_at': None,
+ 'deleted_at': None}}
+ return fixtures
+
+ def _make_blank_datetime_fixtures(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'updated_at': '',
+ 'deleted_at': ''}}
+ return fixtures
+
+
+class TestMutatorDateTimeTests(BaseGlanceTest):
+ """Tests create(), update()"""
+
+ def test_create_handles_datetimes(self):
+ self.client.add_response = self._make_datetime_fixture()
+ image_meta = self.service.create(self.context, {})
+ self.assertDateTimesFilled(image_meta)
+
+ def test_create_handles_none_datetimes(self):
+ self.client.add_response = self._make_none_datetime_fixture()
+ dummy_meta = {}
+ image_meta = self.service.create(self.context, dummy_meta)
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_update_handles_datetimes(self):
+ self.client.update_response = self._make_datetime_fixture()
+ dummy_id = 'dummy_id'
+ dummy_meta = {}
+ image_meta = self.service.update(self.context, 'dummy_id', dummy_meta)
+ self.assertDateTimesFilled(image_meta)
+
+ def test_update_handles_none_datetimes(self):
+ self.client.update_response = self._make_none_datetime_fixture()
+ dummy_id = 'dummy_id'
+ dummy_meta = {}
+ image_meta = self.service.update(self.context, 'dummy_id', dummy_meta)
+ self.assertDateTimesEmpty(image_meta)
+
+ def _make_datetime_fixture(self):
+ fixture = {'id': 'image1', 'name': 'image1', 'is_public': True,
+ 'created_at': self.NOW_GLANCE_FORMAT,
+ 'updated_at': self.NOW_GLANCE_FORMAT,
+ 'deleted_at': self.NOW_GLANCE_FORMAT}
+ return fixture
+
+ def _make_none_datetime_fixture(self):
+ fixture = {'id': 'image1', 'name': 'image1', 'is_public': True,
+ 'updated_at': None,
+ 'deleted_at': None}
+ return fixture
diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py
new file mode 100644
index 000000000..10e0a91d7
--- /dev/null
+++ b/nova/tests/integrated/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`integrated` -- Tests whole systems, using mock services where needed
+=================================
+"""
diff --git a/nova/tests/integrated/api/__init__.py b/nova/tests/integrated/api/__init__.py
new file mode 100644
index 000000000..5798ab3d1
--- /dev/null
+++ b/nova/tests/integrated/api/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`api` -- OpenStack API client, for testing rather than production
+=================================
+"""
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
new file mode 100644
index 000000000..7e20c9b00
--- /dev/null
+++ b/nova/tests/integrated/api/client.py
@@ -0,0 +1,244 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import httplib
+import urlparse
+
+from nova import log as logging
+
+
+LOG = logging.getLogger('nova.tests.api')
+
+
+class OpenStackApiException(Exception):
+ def __init__(self, message=None, response=None):
+ self.response = response
+ if not message:
+ message = 'Unspecified error'
+
+ if response:
+ _status = response.status
+ _body = response.read()
+
+ message = _('%(message)s\nStatus Code: %(_status)s\n'
+ 'Body: %(_body)s') % locals()
+
+ super(OpenStackApiException, self).__init__(message)
+
+
+class OpenStackApiAuthenticationException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Authentication error")
+ super(OpenStackApiAuthenticationException, self).__init__(message,
+ response)
+
+
+class OpenStackApiNotFoundException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Item not found")
+ super(OpenStackApiNotFoundException, self).__init__(message, response)
+
+
+class TestOpenStackClient(object):
+ """Simple OpenStack API Client.
+
+ This is a really basic OpenStack API client that is under our control,
+ so we can make changes / insert hooks for testing
+
+ """
+
+ def __init__(self, auth_user, auth_key, auth_uri):
+ super(TestOpenStackClient, self).__init__()
+ self.auth_result = None
+ self.auth_user = auth_user
+ self.auth_key = auth_key
+ self.auth_uri = auth_uri
+
+ def request(self, url, method='GET', body=None, headers=None):
+ if headers is None:
+ headers = {}
+
+ parsed_url = urlparse.urlparse(url)
+ port = parsed_url.port
+ hostname = parsed_url.hostname
+ scheme = parsed_url.scheme
+
+ if scheme == 'http':
+ conn = httplib.HTTPConnection(hostname,
+ port=port)
+ elif scheme == 'https':
+ conn = httplib.HTTPSConnection(hostname,
+ port=port)
+ else:
+ raise OpenStackApiException("Unknown scheme: %s" % url)
+
+ relative_url = parsed_url.path
+ if parsed_url.query:
+ relative_url = relative_url + parsed_url.query
+ LOG.info(_("Doing %(method)s on %(relative_url)s") % locals())
+ if body:
+ LOG.info(_("Body: %s") % body)
+ headers.setdefault('Content-Type', 'application/json')
+
+ conn.request(method, relative_url, body, headers)
+ response = conn.getresponse()
+ return response
+
+ def _authenticate(self):
+ if self.auth_result:
+ return self.auth_result
+
+ auth_uri = self.auth_uri
+ headers = {'X-Auth-User': self.auth_user,
+ 'X-Auth-Key': self.auth_key}
+ response = self.request(auth_uri,
+ headers=headers)
+
+ http_status = response.status
+ LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals())
+
+ if http_status == 401:
+ raise OpenStackApiAuthenticationException(response=response)
+
+ auth_headers = {}
+ for k, v in response.getheaders():
+ auth_headers[k] = v
+
+ self.auth_result = auth_headers
+ return self.auth_result
+
+ def api_request(self, relative_uri, check_response_status=None, **kwargs):
+ auth_result = self._authenticate()
+
+ # NOTE(justinsb): httplib 'helpfully' converts headers to lower case
+ base_uri = auth_result['x-server-management-url']
+ full_uri = base_uri + relative_uri
+
+ headers = kwargs.setdefault('headers', {})
+ headers['X-Auth-Token'] = auth_result['x-auth-token']
+
+ response = self.request(full_uri, **kwargs)
+
+ http_status = response.status
+ LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals())
+
+ if check_response_status:
+ if not http_status in check_response_status:
+ if http_status == 404:
+ raise OpenStackApiNotFoundException(response=response)
+ else:
+ raise OpenStackApiException(
+ message=_("Unexpected status code"),
+ response=response)
+
+ return response
+
+ def _decode_json(self, response):
+ body = response.read()
+ LOG.debug(_("Decoding JSON: %s") % (body))
+ return json.loads(body)
+
+ def api_get(self, relative_uri, **kwargs):
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_post(self, relative_uri, body, **kwargs):
+ kwargs['method'] = 'POST'
+ if body:
+ headers = kwargs.setdefault('headers', {})
+ headers['Content-Type'] = 'application/json'
+ kwargs['body'] = json.dumps(body)
+
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_delete(self, relative_uri, **kwargs):
+ kwargs['method'] = 'DELETE'
+ kwargs.setdefault('check_response_status', [200, 202])
+ return self.api_request(relative_uri, **kwargs)
+
+ def get_server(self, server_id):
+ return self.api_get('/servers/%s' % server_id)['server']
+
+ def get_servers(self, detail=True):
+ rel_url = '/servers/detail' if detail else '/servers'
+ return self.api_get(rel_url)['servers']
+
+ def post_server(self, server):
+ return self.api_post('/servers', server)['server']
+
+ def delete_server(self, server_id):
+ return self.api_delete('/servers/%s' % server_id)
+
+ def get_image(self, image_id):
+ return self.api_get('/images/%s' % image_id)['image']
+
+ def get_images(self, detail=True):
+ rel_url = '/images/detail' if detail else '/images'
+ return self.api_get(rel_url)['images']
+
+ def post_image(self, image):
+ return self.api_post('/images', image)['image']
+
+ def delete_image(self, image_id):
+ return self.api_delete('/images/%s' % image_id)
+
+ def get_flavor(self, flavor_id):
+ return self.api_get('/flavors/%s' % flavor_id)['flavor']
+
+ def get_flavors(self, detail=True):
+ rel_url = '/flavors/detail' if detail else '/flavors'
+ return self.api_get(rel_url)['flavors']
+
+ def post_flavor(self, flavor):
+ return self.api_post('/flavors', flavor)['flavor']
+
+ def delete_flavor(self, flavor_id):
+ return self.api_delete('/flavors/%s' % flavor_id)
+
+ def get_volume(self, volume_id):
+ return self.api_get('/volumes/%s' % volume_id)['volume']
+
+ def get_volumes(self, detail=True):
+ rel_url = '/volumes/detail' if detail else '/volumes'
+ return self.api_get(rel_url)['volumes']
+
+ def post_volume(self, volume):
+ return self.api_post('/volumes', volume)['volume']
+
+ def delete_volume(self, volume_id):
+ return self.api_delete('/volumes/%s' % volume_id)
+
+ def get_server_volume(self, server_id, attachment_id):
+ return self.api_get('/servers/%s/volume_attachments/%s' %
+ (server_id, attachment_id))['volumeAttachment']
+
+ def get_server_volumes(self, server_id):
+ return self.api_get('/servers/%s/volume_attachments' %
+ (server_id))['volumeAttachments']
+
+ def post_server_volume(self, server_id, volume_attachment):
+ return self.api_post('/servers/%s/volume_attachments' %
+ (server_id), volume_attachment)['volumeAttachment']
+
+ def delete_server_volume(self, server_id, attachment_id):
+ return self.api_delete('/servers/%s/volume_attachments/%s' %
+ (server_id, attachment_id))
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
new file mode 100644
index 000000000..2e5d67017
--- /dev/null
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -0,0 +1,221 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides common functionality for integrated unit tests
+"""
+
+import random
+import string
+
+from nova import exception
+from nova import flags
+from nova import service
+from nova import test # For the flags
+from nova.auth import manager
+from nova.log import logging
+from nova.tests.integrated.api import client
+
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+def generate_random_alphanumeric(length):
+ """Creates a random alphanumeric string of specified length."""
+ return ''.join(random.choice(string.ascii_uppercase + string.digits)
+ for _x in range(length))
+
+
+def generate_random_numeric(length):
+ """Creates a random numeric string of specified length."""
+ return ''.join(random.choice(string.digits)
+ for _x in range(length))
+
+
+def generate_new_element(items, prefix, numeric=False):
+ """Creates a random string with prefix, that is not in 'items' list."""
+ while True:
+ if numeric:
+ candidate = prefix + generate_random_numeric(8)
+ else:
+ candidate = prefix + generate_random_alphanumeric(8)
+ if not candidate in items:
+ return candidate
+ LOG.debug("Random collision on %s" % candidate)
+
+
+class TestUser(object):
+ def __init__(self, name, secret, auth_url):
+ self.name = name
+ self.secret = secret
+ self.auth_url = auth_url
+
+ if not auth_url:
+ raise exception.Error("auth_url is required")
+ self.openstack_api = client.TestOpenStackClient(self.name,
+ self.secret,
+ self.auth_url)
+
+ def get_unused_server_name(self):
+ servers = self.openstack_api.get_servers()
+ server_names = [server['name'] for server in servers]
+ return generate_new_element(server_names, 'server')
+
+ def get_invalid_image(self):
+ images = self.openstack_api.get_images()
+ image_ids = [image['id'] for image in images]
+ return generate_new_element(image_ids, '', numeric=True)
+
+ def get_valid_image(self, create=False):
+ images = self.openstack_api.get_images()
+ if create and not images:
+ # TODO(justinsb): No way currently to create an image through API
+ #created_image = self.openstack_api.post_image(image)
+ #images.append(created_image)
+ raise exception.Error("No way to create an image through API")
+
+ if images:
+ return images[0]
+ return None
+
+
+class IntegratedUnitTestContext(object):
+ def __init__(self, auth_url):
+ self.auth_manager = manager.AuthManager()
+
+ self.auth_url = auth_url
+ self.project_name = None
+
+ self.test_user = None
+
+ self.setup()
+
+ def setup(self):
+ self._create_test_user()
+
+ def _create_test_user(self):
+ self.test_user = self._create_unittest_user()
+
+ # No way to currently pass this through the OpenStack API
+ self.project_name = 'openstack'
+ self._configure_project(self.project_name, self.test_user)
+
+ def cleanup(self):
+ self.test_user = None
+
+ def _create_unittest_user(self):
+ users = self.auth_manager.get_users()
+ user_names = [user.name for user in users]
+ auth_name = generate_new_element(user_names, 'unittest_user_')
+ auth_key = generate_random_alphanumeric(16)
+
+ # Right now there's a bug where auth_name and auth_key are reversed
+ # bug732907
+ auth_key = auth_name
+
+ self.auth_manager.create_user(auth_name, auth_name, auth_key, False)
+ return TestUser(auth_name, auth_key, self.auth_url)
+
+ def _configure_project(self, project_name, user):
+ projects = self.auth_manager.get_projects()
+ project_names = [project.name for project in projects]
+ if not project_name in project_names:
+ project = self.auth_manager.create_project(project_name,
+ user.name,
+ description=None,
+ member_users=None)
+ else:
+ self.auth_manager.add_to_project(user.name, project_name)
+
+
+class _IntegratedTestBase(test.TestCase):
+ def setUp(self):
+ super(_IntegratedTestBase, self).setUp()
+
+ f = self._get_flags()
+ self.flags(**f)
+
+ # set up services
+ self.start_service('compute')
+ self.start_service('volume')
+ # NOTE(justinsb): There's a bug here which is eluding me...
+ # If we start the network_service, all is good, but then subsequent
+ # tests fail: CloudTestCase.test_ajax_console in particular.
+ #self.start_service('network')
+ self.start_service('scheduler')
+
+ self.auth_url = self._start_api_service()
+
+ self.context = IntegratedUnitTestContext(self.auth_url)
+
+ self.user = self.context.test_user
+ self.api = self.user.openstack_api
+
+ def _start_api_service(self):
+ api_service = service.ApiService.create()
+ api_service.start()
+
+ if not api_service:
+ raise Exception("API Service was None")
+
+ auth_url = 'http://localhost:8774/v1.1'
+ return auth_url
+
+ def tearDown(self):
+ self.context.cleanup()
+ super(_IntegratedTestBase, self).tearDown()
+
+ def _get_flags(self):
+ """An opportunity to setup flags, before the services are started."""
+ f = {}
+ f['image_service'] = 'nova.image.fake.FakeImageService'
+ f['fake_network'] = True
+ return f
+
+ def _build_minimal_create_server_request(self):
+ server = {}
+
+ image = self.user.get_valid_image(create=True)
+ LOG.debug("Image: %s" % image)
+
+ if 'imageRef' in image:
+ image_ref = image['imageRef']
+ else:
+ # NOTE(justinsb): The imageRef code hasn't yet landed
+ LOG.warning("imageRef not yet in images output")
+ image_ref = image['id']
+
+ # TODO(justinsb): This is FUBAR
+ image_ref = abs(hash(image_ref))
+
+ image_ref = 'http://fake.server/%s' % image_ref
+
+ # We now have a valid imageId
+ server['imageRef'] = image_ref
+
+ # Set a valid flavorId
+ flavor = self.api.get_flavors()[0]
+ LOG.debug("Using flavor: %s" % flavor)
+ server['flavorRef'] = 'http://fake.server/%s' % flavor['id']
+
+ # Set a valid server name
+ server_name = self.user.get_unused_server_name()
+ server['name'] = server_name
+
+ return server
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
new file mode 100644
index 000000000..0d4ee8cab
--- /dev/null
+++ b/nova/tests/integrated/test_extensions.py
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class ExtensionsTest(integrated_helpers._IntegratedTestBase):
+ def _get_flags(self):
+ f = super(ExtensionsTest, self)._get_flags()
+ f['osapi_extensions_path'] = os.path.join(os.path.dirname(__file__),
+ "../api/openstack/extensions")
+ return f
+
+ def test_get_foxnsocks(self):
+ """Simple check that fox-n-socks works."""
+ response = self.api.api_request('/foxnsocks')
+ foxnsocks = response.read()
+ LOG.debug("foxnsocks: %s" % foxnsocks)
+ self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
new file mode 100644
index 000000000..a5180b6bc
--- /dev/null
+++ b/nova/tests/integrated/test_login.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+from nova.tests.integrated.api import client
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class LoginTest(integrated_helpers._IntegratedTestBase):
+ def test_login(self):
+ """Simple check - we list flavors - so we know we're logged in."""
+ flavors = self.api.get_flavors()
+ for flavor in flavors:
+ LOG.debug(_("flavor: %s") % flavor)
+
+ def test_bad_login_password(self):
+ """Test that I get a 401 with a bad username."""
+ bad_credentials_api = client.TestOpenStackClient(self.user.name,
+ "notso_password",
+ self.user.auth_url)
+
+ self.assertRaises(client.OpenStackApiAuthenticationException,
+ bad_credentials_api.get_flavors)
+
+ def test_bad_login_username(self):
+ """Test that I get a 401 with a bad password."""
+ bad_credentials_api = client.TestOpenStackClient("notso_username",
+ self.user.secret,
+ self.user.auth_url)
+
+ self.assertRaises(client.OpenStackApiAuthenticationException,
+ bad_credentials_api.get_flavors)
+
+ def test_bad_login_both_bad(self):
+ """Test that I get a 401 with both bad username and bad password."""
+ bad_credentials_api = client.TestOpenStackClient("notso_username",
+ "notso_password",
+ self.user.auth_url)
+
+ self.assertRaises(client.OpenStackApiAuthenticationException,
+ bad_credentials_api.get_flavors)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
new file mode 100644
index 000000000..749ea8955
--- /dev/null
+++ b/nova/tests/integrated/test_servers.py
@@ -0,0 +1,184 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+import unittest
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+from nova.tests.integrated.api import client
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class ServersTest(integrated_helpers._IntegratedTestBase):
+ def test_get_servers(self):
+ """Simple check that listing servers works."""
+ servers = self.api.get_servers()
+ for server in servers:
+ LOG.debug("server: %s" % server)
+
+ def test_create_and_delete_server(self):
+ """Creates and deletes a server."""
+
+ # Create server
+
+ # Build the server data gradually, checking errors along the way
+ server = {}
+ good_server = self._build_minimal_create_server_request()
+
+ post = {'server': server}
+
+ # Without an imageRef, this throws 500.
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # With an invalid imageRef, this throws 500.
+ server['imageRef'] = self.user.get_invalid_image()
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Add a valid imageId/imageRef
+ server['imageId'] = good_server.get('imageId')
+ server['imageRef'] = good_server.get('imageRef')
+
+ # Without flavorId, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Set a valid flavorId/flavorRef
+ server['flavorRef'] = good_server.get('flavorRef')
+ server['flavorId'] = good_server.get('flavorId')
+
+ # Without a name, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Set a valid server name
+ server['name'] = good_server['name']
+
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ # It should also be in the all-servers list
+ servers = self.api.get_servers()
+ server_ids = [server['id'] for server in servers]
+ self.assertTrue(created_server_id in server_ids)
+
+ # Wait (briefly) for creation
+ retries = 0
+ while found_server['status'] == 'build':
+ LOG.debug("found server: %s" % found_server)
+ time.sleep(1)
+ found_server = self.api.get_server(created_server_id)
+ retries = retries + 1
+ if retries > 5:
+ break
+
+ # It should be available...
+ # TODO(justinsb): Mock doesn't yet do this...
+ #self.assertEqual('available', found_server['status'])
+
+ self._delete_server(created_server_id)
+
+ def _delete_server(self, server_id):
+ # Delete the server
+ self.api.delete_server(server_id)
+
+ # Wait (briefly) for deletion
+ for _retries in range(5):
+ try:
+ found_server = self.api.get_server(server_id)
+ except client.OpenStackApiNotFoundException:
+ found_server = None
+ LOG.debug("Got 404, proceeding")
+ break
+
+ LOG.debug("Found_server=%s" % found_server)
+
+ # TODO(justinsb): Mock doesn't yet do accurate state changes
+ #if found_server['status'] != 'deleting':
+ # break
+ time.sleep(1)
+
+ # Should be gone
+ self.assertFalse(found_server)
+
+# TODO(justinsb): Enable this unit test when the metadata bug is fixed
+# def test_create_server_with_metadata(self):
+# """Creates a server with metadata"""
+#
+# # Build the server data gradually, checking errors along the way
+# server = self._build_minimal_create_server_request()
+#
+# for metadata_count in range(30):
+# metadata = {}
+# for i in range(metadata_count):
+# metadata['key_%s' % i] = 'value_%s' % i
+# server['metadata'] = metadata
+#
+# post = {'server': server}
+# created_server = self.api.post_server(post)
+# LOG.debug("created_server: %s" % created_server)
+# self.assertTrue(created_server['id'])
+# created_server_id = created_server['id']
+# # Reenable when bug fixed
+# # self.assertEqual(metadata, created_server.get('metadata'))
+#
+# # Check it's there
+# found_server = self.api.get_server(created_server_id)
+# self.assertEqual(created_server_id, found_server['id'])
+# self.assertEqual(metadata, found_server.get('metadata'))
+#
+# # The server should also be in the all-servers details list
+# servers = self.api.get_servers(detail=True)
+# server_map = dict((server['id'], server) for server in servers)
+# found_server = server_map.get(created_server_id)
+# self.assertTrue(found_server)
+# # Details do include metadata
+# self.assertEqual(metadata, found_server.get('metadata'))
+#
+# # The server should also be in the all-servers summary list
+# servers = self.api.get_servers(detail=False)
+# server_map = dict((server['id'], server) for server in servers)
+# found_server = server_map.get(created_server_id)
+# self.assertTrue(found_server)
+# # Summary should not include metadata
+# self.assertFalse(found_server.get('metadata'))
+#
+# # Cleanup
+# self._delete_server(created_server_id)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py
new file mode 100644
index 000000000..e9fb3c4d1
--- /dev/null
+++ b/nova/tests/integrated/test_volumes.py
@@ -0,0 +1,295 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+import time
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+from nova.tests.integrated.api import client
+from nova.volume import driver
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class VolumesTest(integrated_helpers._IntegratedTestBase):
+ def setUp(self):
+ super(VolumesTest, self).setUp()
+ driver.LoggingVolumeDriver.clear_logs()
+
+ def _get_flags(self):
+ f = super(VolumesTest, self)._get_flags()
+ f['use_local_volumes'] = False # Avoids calling local_path
+ f['volume_driver'] = 'nova.volume.driver.LoggingVolumeDriver'
+ return f
+
+ def test_get_volumes_summary(self):
+ """Simple check that listing volumes works."""
+ volumes = self.api.get_volumes(False)
+ for volume in volumes:
+ LOG.debug("volume: %s" % volume)
+
+ def test_get_volumes(self):
+ """Simple check that listing volumes works."""
+ volumes = self.api.get_volumes()
+ for volume in volumes:
+ LOG.debug("volume: %s" % volume)
+
+ def _poll_while(self, volume_id, continue_states, max_retries=5):
+ """Poll (briefly) while the state is in continue_states."""
+ retries = 0
+ while True:
+ try:
+ found_volume = self.api.get_volume(volume_id)
+ except client.OpenStackApiNotFoundException:
+ found_volume = None
+ LOG.debug("Got 404, proceeding")
+ break
+
+ LOG.debug("Found %s" % found_volume)
+
+ self.assertEqual(volume_id, found_volume['id'])
+
+ if not found_volume['status'] in continue_states:
+ break
+
+ time.sleep(1)
+ retries = retries + 1
+ if retries > max_retries:
+ break
+ return found_volume
+
+ def test_create_and_delete_volume(self):
+ """Creates and deletes a volume."""
+
+ # Create volume
+ created_volume = self.api.post_volume({'volume': {'size': 1}})
+ LOG.debug("created_volume: %s" % created_volume)
+ self.assertTrue(created_volume['id'])
+ created_volume_id = created_volume['id']
+
+ # Check it's there
+ found_volume = self.api.get_volume(created_volume_id)
+ self.assertEqual(created_volume_id, found_volume['id'])
+
+ # It should also be in the all-volume list
+ volumes = self.api.get_volumes()
+ volume_names = [volume['id'] for volume in volumes]
+ self.assertTrue(created_volume_id in volume_names)
+
+ # Wait (briefly) for creation. Delay is due to the 'message queue'
+ found_volume = self._poll_while(created_volume_id, ['creating'])
+
+ # It should be available...
+ self.assertEqual('available', found_volume['status'])
+
+ # Delete the volume
+ self.api.delete_volume(created_volume_id)
+
+ # Wait (briefly) for deletion. Delay is due to the 'message queue'
+ found_volume = self._poll_while(created_volume_id, ['deleting'])
+
+ # Should be gone
+ self.assertFalse(found_volume)
+
+ LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs())
+
+ create_actions = driver.LoggingVolumeDriver.logs_like(
+ 'create_volume',
+ id=created_volume_id)
+ LOG.debug("Create_Actions: %s" % create_actions)
+
+ self.assertEquals(1, len(create_actions))
+ create_action = create_actions[0]
+ self.assertEquals(create_action['id'], created_volume_id)
+ self.assertEquals(create_action['availability_zone'], 'nova')
+ self.assertEquals(create_action['size'], 1)
+
+ export_actions = driver.LoggingVolumeDriver.logs_like(
+ 'create_export',
+ id=created_volume_id)
+ self.assertEquals(1, len(export_actions))
+ export_action = export_actions[0]
+ self.assertEquals(export_action['id'], created_volume_id)
+ self.assertEquals(export_action['availability_zone'], 'nova')
+
+ delete_actions = driver.LoggingVolumeDriver.logs_like(
+ 'delete_volume',
+ id=created_volume_id)
+ self.assertEquals(1, len(delete_actions))
+ delete_action = export_actions[0]
+ self.assertEquals(delete_action['id'], created_volume_id)
+
+ def test_attach_and_detach_volume(self):
+ """Creates, attaches, detaches and deletes a volume."""
+
+ # Create server
+ server_req = {'server': self._build_minimal_create_server_request()}
+ # NOTE(justinsb): Create an extra server so that server_id != volume_id
+ self.api.post_server(server_req)
+ created_server = self.api.post_server(server_req)
+ LOG.debug("created_server: %s" % created_server)
+ server_id = created_server['id']
+
+ # Create volume
+ created_volume = self.api.post_volume({'volume': {'size': 1}})
+ LOG.debug("created_volume: %s" % created_volume)
+ volume_id = created_volume['id']
+ self._poll_while(volume_id, ['creating'])
+
+ # Check we've got different IDs
+ self.assertNotEqual(server_id, volume_id)
+
+ # List current server attachments - should be none
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([], attachments)
+
+ # Template attach request
+ device = '/dev/sdc'
+ attach_req = {'device': device}
+ post_req = {'volumeAttachment': attach_req}
+
+ # Try to attach to a non-existent volume; should fail
+ attach_req['volumeId'] = 3405691582
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.post_server_volume, server_id, post_req)
+
+ # Try to attach to a non-existent server; should fail
+ attach_req['volumeId'] = volume_id
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.post_server_volume, 3405691582, post_req)
+
+ # Should still be no attachments...
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([], attachments)
+
+ # Do a real attach
+ attach_req['volumeId'] = volume_id
+ attach_result = self.api.post_server_volume(server_id, post_req)
+ LOG.debug(_("Attachment = %s") % attach_result)
+
+ attachment_id = attach_result['id']
+ self.assertEquals(volume_id, attach_result['volumeId'])
+
+ # These fields aren't set because it's async
+ #self.assertEquals(server_id, attach_result['serverId'])
+ #self.assertEquals(device, attach_result['device'])
+
+ # This is just an implementation detail, but let's check it...
+ self.assertEquals(volume_id, attachment_id)
+
+ # NOTE(justinsb): There's an issue with the attach code, in that
+ # it's currently asynchronous and not recorded until the attach
+ # completes. So the caller must be 'smart', like this...
+ attach_done = None
+ retries = 0
+ while True:
+ try:
+ attach_done = self.api.get_server_volume(server_id,
+ attachment_id)
+ break
+ except client.OpenStackApiNotFoundException:
+ LOG.debug("Got 404, waiting")
+
+ time.sleep(1)
+ retries = retries + 1
+ if retries > 10:
+ break
+
+ expect_attach = {}
+ expect_attach['id'] = volume_id
+ expect_attach['volumeId'] = volume_id
+ expect_attach['serverId'] = server_id
+ expect_attach['device'] = device
+
+ self.assertEqual(expect_attach, attach_done)
+
+ # Should be one attachemnt
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([expect_attach], attachments)
+
+ # Should be able to get details
+ attachment_info = self.api.get_server_volume(server_id, attachment_id)
+ self.assertEquals(expect_attach, attachment_info)
+
+ # Getting details on a different id should fail
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.get_server_volume, server_id, 3405691582)
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.get_server_volume,
+ 3405691582, attachment_id)
+
+ # Trying to detach a different id should fail
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.delete_server_volume, server_id, 3405691582)
+
+ # Detach should work
+ self.api.delete_server_volume(server_id, attachment_id)
+
+ # Again, it's async, so wait...
+ retries = 0
+ while True:
+ try:
+ attachment = self.api.get_server_volume(server_id,
+ attachment_id)
+ LOG.debug("Attachment still there: %s" % attachment)
+ except client.OpenStackApiNotFoundException:
+ LOG.debug("Got 404, delete done")
+ break
+
+ time.sleep(1)
+ retries = retries + 1
+ self.assertTrue(retries < 10)
+
+ # Should be no attachments again
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([], attachments)
+
+ LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs())
+
+ # Discover_volume and undiscover_volume are called from compute
+ # on attach/detach
+
+ disco_moves = driver.LoggingVolumeDriver.logs_like(
+ 'discover_volume',
+ id=volume_id)
+ LOG.debug("discover_volume actions: %s" % disco_moves)
+
+ self.assertEquals(1, len(disco_moves))
+ disco_move = disco_moves[0]
+ self.assertEquals(disco_move['id'], volume_id)
+
+ last_days_of_disco_moves = driver.LoggingVolumeDriver.logs_like(
+ 'undiscover_volume',
+ id=volume_id)
+ LOG.debug("undiscover_volume actions: %s" % last_days_of_disco_moves)
+
+ self.assertEquals(1, len(last_days_of_disco_moves))
+ undisco_move = last_days_of_disco_moves[0]
+ self.assertEquals(undisco_move['id'], volume_id)
+ self.assertEquals(undisco_move['mountpoint'], device)
+ self.assertEquals(undisco_move['instance_id'], server_id)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/nova/tests/network/__init__.py b/nova/tests/network/__init__.py
new file mode 100644
index 000000000..97f96b6fa
--- /dev/null
+++ b/nova/tests/network/__init__.py
@@ -0,0 +1,67 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Utility methods
+"""
+import os
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import utils
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+def binpath(script):
+ """Returns the absolute path to a script in bin"""
+ return os.path.abspath(os.path.join(__file__, "../../../../bin", script))
+
+
+def lease_ip(private_ip):
+ """Run add command on dhcpbridge"""
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(),
+ private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
+ private_ip)
+ cmd = (binpath('nova-dhcpbridge'), 'add',
+ instance_ref['mac_address'],
+ private_ip, 'fake')
+ env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
+ 'TESTING': '1',
+ 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
+ (out, err) = utils.execute(*cmd, addl_env=env)
+ LOG.debug("ISSUE_IP: %s, %s ", out, err)
+
+
+def release_ip(private_ip):
+ """Run del command on dhcpbridge"""
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(),
+ private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
+ private_ip)
+ cmd = (binpath('nova-dhcpbridge'), 'del',
+ instance_ref['mac_address'],
+ private_ip, 'fake')
+ env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
+ 'TESTING': '1',
+ 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
+ (out, err) = utils.execute(*cmd, addl_env=env)
+ LOG.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py
new file mode 100644
index 000000000..988a1de72
--- /dev/null
+++ b/nova/tests/network/base.py
@@ -0,0 +1,154 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Base class of Unit Tests for all network models
+"""
+import IPy
+import os
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.auth import manager
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+class NetworkTestCase(test.TestCase):
+ """Test cases for network code"""
+ def setUp(self):
+ super(NetworkTestCase, self).setUp()
+ # NOTE(vish): if you change these flags, make sure to change the
+ # flags in the corresponding section in nova-dhcpbridge
+ self.flags(connection_type='fake',
+ fake_call=True,
+ fake_network=True)
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
+ self.projects = []
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.context = context.RequestContext(project=None, user=self.user)
+ for i in range(FLAGS.num_networks):
+ name = 'project%s' % i
+ project = self.manager.create_project(name, 'netuser', name)
+ self.projects.append(project)
+ # create the necessary network data for the project
+ user_context = context.RequestContext(project=self.projects[i],
+ user=self.user)
+ host = self.network.get_network_host(user_context.elevated())
+ instance_ref = self._create_instance(0)
+ self.instance_id = instance_ref['id']
+ instance_ref = self._create_instance(1)
+ self.instance2_id = instance_ref['id']
+
+ def tearDown(self):
+ # TODO(termie): this should really be instantiating clean datastores
+ # in between runs, one failure kills all the tests
+ db.instance_destroy(context.get_admin_context(), self.instance_id)
+ db.instance_destroy(context.get_admin_context(), self.instance2_id)
+ for project in self.projects:
+ self.manager.delete_project(project)
+ self.manager.delete_user(self.user)
+ super(NetworkTestCase, self).tearDown()
+
+ def _create_instance(self, project_num, mac=None):
+ if not mac:
+ mac = utils.generate_mac()
+ project = self.projects[project_num]
+ self.context._project = project
+ self.context.project_id = project.id
+ return db.instance_create(self.context,
+ {'project_id': project.id,
+ 'mac_address': mac})
+
+ def _create_address(self, project_num, instance_id=None):
+ """Create an address in given project num"""
+ if instance_id is None:
+ instance_id = self.instance_id
+ self.context._project = self.projects[project_num]
+ self.context.project_id = self.projects[project_num].id
+ return self.network.allocate_fixed_ip(self.context, instance_id)
+
+ def _deallocate_address(self, project_num, address):
+ self.context._project = self.projects[project_num]
+ self.context.project_id = self.projects[project_num].id
+ self.network.deallocate_fixed_ip(self.context, address)
+
+ def _is_allocated_in_project(self, address, project_id):
+ """Returns true if address is in specified project"""
+ project_net = db.network_get_by_bridge(context.get_admin_context(),
+ FLAGS.flat_network_bridge)
+ network = db.fixed_ip_get_network(context.get_admin_context(),
+ address)
+ instance = db.fixed_ip_get_instance(context.get_admin_context(),
+ address)
+ # instance exists until release
+ return instance is not None and network['id'] == project_net['id']
+
+ def test_private_ipv6(self):
+ """Make sure ipv6 is OK"""
+ if FLAGS.use_ipv6:
+ instance_ref = self._create_instance(0)
+ address = self._create_address(0, instance_ref['id'])
+ network_ref = db.project_get_network(
+ context.get_admin_context(),
+ self.context.project_id)
+ address_v6 = db.instance_get_fixed_address_v6(
+ context.get_admin_context(),
+ instance_ref['id'])
+ self.assertEqual(instance_ref['mac_address'],
+ utils.to_mac(address_v6))
+ instance_ref2 = db.fixed_ip_get_instance_v6(
+ context.get_admin_context(),
+ address_v6)
+ self.assertEqual(instance_ref['id'], instance_ref2['id'])
+ self.assertEqual(address_v6,
+ utils.to_global_ipv6(
+ network_ref['cidr_v6'],
+ instance_ref['mac_address']))
+ self._deallocate_address(0, address)
+ db.instance_destroy(context.get_admin_context(),
+ instance_ref['id'])
+
+ def test_available_ips(self):
+ """Make sure the number of available ips for the network is correct
+
+ The number of available IP addresses depends on the test
+ environment's setup.
+
+ Network size is set in test fixture's setUp method.
+
+ There are ips reserved at the bottom and top of the range.
+ services (network, gateway, CloudPipe, broadcast)
+ """
+ network = db.project_get_network(context.get_admin_context(),
+ self.projects[0].id)
+ net_size = flags.FLAGS.network_size
+ admin_context = context.get_admin_context()
+ total_ips = (db.network_count_available_ips(admin_context,
+ network['id']) +
+ db.network_count_reserved_ips(admin_context,
+ network['id']) +
+ db.network_count_allocated_ips(admin_context,
+ network['id']))
+ self.assertEqual(total_ips, net_size)
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
deleted file mode 100644
index 5a1be08eb..000000000
--- a/nova/tests/objectstore_unittest.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Unittets for S3 objectstore clone.
-"""
-
-import boto
-import glob
-import hashlib
-import os
-import shutil
-import tempfile
-
-from boto.s3.connection import S3Connection, OrdinaryCallingFormat
-from twisted.internet import reactor, threads, defer
-from twisted.web import http, server
-
-from nova import context
-from nova import flags
-from nova import objectstore
-from nova import test
-from nova.auth import manager
-from nova.exception import NotEmpty, NotFound
-from nova.objectstore import image
-from nova.objectstore.handler import S3
-
-
-FLAGS = flags.FLAGS
-
-# Create a unique temporary directory. We don't delete after test to
-# allow checking the contents after running tests. Users and/or tools
-# running the tests need to remove the tests directories.
-OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
-
-# Create bucket/images path
-os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
-os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
-
-
-class ObjectStoreTestCase(test.TestCase):
- """Test objectstore API directly."""
-
- def setUp(self):
- """Setup users and projects."""
- super(ObjectStoreTestCase, self).setUp()
- self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
- images_path=os.path.join(OSS_TEMPDIR, 'images'),
- ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
-
- self.auth_manager = manager.AuthManager()
- self.auth_manager.create_user('user1')
- self.auth_manager.create_user('user2')
- self.auth_manager.create_user('admin_user', admin=True)
- self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1'])
- self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
- self.context = context.RequestContext('user1', 'proj1')
-
- def tearDown(self):
- """Tear down users and projects."""
- self.auth_manager.delete_project('proj1')
- self.auth_manager.delete_project('proj2')
- self.auth_manager.delete_user('user1')
- self.auth_manager.delete_user('user2')
- self.auth_manager.delete_user('admin_user')
- super(ObjectStoreTestCase, self).tearDown()
-
- def test_buckets(self):
- """Test the bucket API."""
- objectstore.bucket.Bucket.create('new_bucket', self.context)
- bucket = objectstore.bucket.Bucket('new_bucket')
-
- # creator is authorized to use bucket
- self.assert_(bucket.is_authorized(self.context))
-
- # another user is not authorized
- context2 = context.RequestContext('user2', 'proj2')
- self.assertFalse(bucket.is_authorized(context2))
-
- # admin is authorized to use bucket
- admin_context = context.RequestContext('admin_user', None)
- self.assertTrue(bucket.is_authorized(admin_context))
-
- # new buckets are empty
- self.assertTrue(bucket.list_keys()['Contents'] == [])
-
- # storing keys works
- bucket['foo'] = "bar"
-
- self.assertEquals(len(bucket.list_keys()['Contents']), 1)
-
- self.assertEquals(bucket['foo'].read(), 'bar')
-
- # md5 of key works
- self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
-
- # deleting non-empty bucket should throw a NotEmpty exception
- self.assertRaises(NotEmpty, bucket.delete)
-
- # deleting key
- del bucket['foo']
-
- # deleting empty bucket
- bucket.delete()
-
- # accessing deleted bucket throws exception
- self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
-
- def test_images(self):
- self.do_test_images('1mb.manifest.xml', True,
- 'image_bucket1', 'i-testing1')
-
- def test_images_no_kernel_or_ramdisk(self):
- self.do_test_images('1mb.no_kernel_or_ramdisk.manifest.xml',
- False, 'image_bucket2', 'i-testing2')
-
- def do_test_images(self, manifest_file, expect_kernel_and_ramdisk,
- image_bucket, image_name):
- "Test the image API."
-
- # create a bucket for our bundle
- objectstore.bucket.Bucket.create(image_bucket, self.context)
- bucket = objectstore.bucket.Bucket(image_bucket)
-
- # upload an image manifest/parts
- bundle_path = os.path.join(os.path.dirname(__file__), 'bundle')
- for path in glob.glob(bundle_path + '/*'):
- bucket[os.path.basename(path)] = open(path, 'rb').read()
-
- # register an image
- image.Image.register_aws_image(image_name,
- '%s/%s' % (image_bucket, manifest_file),
- self.context)
-
- # verify image
- my_img = image.Image(image_name)
- result_image_file = os.path.join(my_img.path, 'image')
- self.assertEqual(os.stat(result_image_file).st_size, 1048576)
-
- sha = hashlib.sha1(open(result_image_file).read()).hexdigest()
- self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
-
- if expect_kernel_and_ramdisk:
- # Verify the default kernel and ramdisk are set
- self.assertEqual(my_img.metadata['kernelId'], 'aki-test')
- self.assertEqual(my_img.metadata['ramdiskId'], 'ari-test')
- else:
- # Verify that the default kernel and ramdisk (the one from FLAGS)
- # doesn't get embedded in the metadata
- self.assertFalse('kernelId' in my_img.metadata)
- self.assertFalse('ramdiskId' in my_img.metadata)
-
- # verify image permissions
- context2 = context.RequestContext('user2', 'proj2')
- self.assertFalse(my_img.is_authorized(context2))
-
- # change user-editable fields
- my_img.update_user_editable_fields({'display_name': 'my cool image'})
- self.assertEqual('my cool image', my_img.metadata['displayName'])
- my_img.update_user_editable_fields({'display_name': ''})
- self.assert_(not my_img.metadata['displayName'])
-
-
-class TestHTTPChannel(http.HTTPChannel):
- """Dummy site required for twisted.web"""
-
- def checkPersistence(self, _, __): # pylint: disable-msg=C0103
- """Otherwise we end up with an unclean reactor."""
- return False
-
-
-class TestSite(server.Site):
- """Dummy site required for twisted.web"""
- protocol = TestHTTPChannel
-
-
-class S3APITestCase(test.TestCase):
- """Test objectstore through S3 API."""
-
- def setUp(self):
- """Setup users, projects, and start a test server."""
- super(S3APITestCase, self).setUp()
-
- FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
- FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
-
- self.auth_manager = manager.AuthManager()
- self.admin_user = self.auth_manager.create_user('admin', admin=True)
- self.admin_project = self.auth_manager.create_project('admin',
- self.admin_user)
-
- shutil.rmtree(FLAGS.buckets_path)
- os.mkdir(FLAGS.buckets_path)
-
- root = S3()
- self.site = TestSite(root)
- # pylint: disable-msg=E1101
- self.listening_port = reactor.listenTCP(0, self.site,
- interface='127.0.0.1')
- # pylint: enable-msg=E1101
- self.tcp_port = self.listening_port.getHost().port
-
- if not boto.config.has_section('Boto'):
- boto.config.add_section('Boto')
- boto.config.set('Boto', 'num_retries', '0')
- self.conn = S3Connection(aws_access_key_id=self.admin_user.access,
- aws_secret_access_key=self.admin_user.secret,
- host='127.0.0.1',
- port=self.tcp_port,
- is_secure=False,
- calling_format=OrdinaryCallingFormat())
-
- def get_http_connection(host, is_secure):
- """Get a new S3 connection, don't attempt to reuse connections."""
- return self.conn.new_http_connection(host, is_secure)
-
- self.conn.get_http_connection = get_http_connection
-
- def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
- self.assertEquals(len(buckets), 0, "Bucket list was not empty")
- return True
-
- def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
- self.assertEquals(len(buckets), 1,
- "Bucket list didn't have exactly one element in it")
- self.assertEquals(buckets[0].name, name, "Wrong name")
- return True
-
- def test_000_list_buckets(self):
- """Make sure we are starting with no buckets."""
- deferred = threads.deferToThread(self.conn.get_all_buckets)
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def test_001_create_and_delete_bucket(self):
- """Test bucket creation and deletion."""
- bucket_name = 'testbucket'
-
- deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_all_buckets))
-
- deferred.addCallback(self._ensure_one_bucket, bucket_name)
-
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.delete_bucket,
- bucket_name))
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_all_buckets))
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def test_002_create_bucket_and_key_and_delete_key_again(self):
- """Test key operations on buckets."""
- bucket_name = 'testbucket'
- key_name = 'somekey'
- key_contents = 'somekey'
-
- deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
- deferred.addCallback(lambda b:
- threads.deferToThread(b.new_key, key_name))
- deferred.addCallback(lambda k:
- threads.deferToThread(k.set_contents_from_string,
- key_contents))
-
- def ensure_key_contents(bucket_name, key_name, contents):
- """Verify contents for a key in the given bucket."""
- bucket = self.conn.get_bucket(bucket_name)
- key = bucket.get_key(key_name)
- self.assertEquals(key.get_contents_as_string(), contents,
- "Bad contents")
-
- deferred.addCallback(lambda _:
- threads.deferToThread(ensure_key_contents,
- bucket_name, key_name,
- key_contents))
-
- def delete_key(bucket_name, key_name):
- """Delete a key for the given bucket."""
- bucket = self.conn.get_bucket(bucket_name)
- key = bucket.get_key(key_name)
- key.delete()
-
- deferred.addCallback(lambda _:
- threads.deferToThread(delete_key, bucket_name,
- key_name))
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_bucket,
- bucket_name))
- deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys))
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def tearDown(self):
- """Tear down auth and test server."""
- self.auth_manager.delete_user('admin')
- self.auth_manager.delete_project('admin')
- stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
- super(S3APITestCase, self).tearDown()
- return defer.DeferredList([stop_listening])
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index d5c54a1c3..fa0e56597 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -20,6 +20,7 @@
import boto
from boto.ec2 import regioninfo
+from boto.exception import EC2ResponseError
import datetime
import httplib
import random
@@ -124,7 +125,7 @@ class ApiEc2TestCase(test.TestCase):
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
- # pylint: disable-msg=E1103
+ # pylint: disable=E1103
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
@@ -177,6 +178,17 @@ class ApiEc2TestCase(test.TestCase):
self.manager.delete_project(project)
self.manager.delete_user(user)
+ def test_terminate_invalid_instance(self):
+ """Attempt to terminate an invalid instance"""
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+ self.assertRaises(EC2ResponseError, self.ec2.terminate_instances,
+ "i-00000005")
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly"""
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 2a7817032..f8a1b1564 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -80,10 +80,10 @@ class user_and_project_generator(object):
self.manager.delete_project(self.project)
-class AuthManagerTestCase(object):
+class _AuthManagerBaseTestCase(test.TestCase):
def setUp(self):
FLAGS.auth_driver = self.auth_driver
- super(AuthManagerTestCase, self).setUp()
+ super(_AuthManagerBaseTestCase, self).setUp()
self.flags(connection_type='fake')
self.manager = manager.AuthManager(new=True)
@@ -299,6 +299,13 @@ class AuthManagerTestCase(object):
self.assertEqual('test2', project.project_manager_id)
self.assertEqual('new desc', project.description)
+ def test_modify_project_adds_new_manager(self):
+ with user_and_project_generator(self.manager):
+ with user_generator(self.manager, name='test2'):
+ self.manager.modify_project('testproj', 'test2', 'new desc')
+ project = self.manager.get_project('testproj')
+ self.assertTrue('test2' in project.member_ids)
+
def test_can_delete_project(self):
with user_generator(self.manager):
self.manager.create_project('testproj', 'test1')
@@ -324,11 +331,11 @@ class AuthManagerTestCase(object):
self.assertTrue(user.is_admin())
-class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
+class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
-class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase):
+class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 061910013..5cb969979 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -35,29 +35,23 @@ from nova import log as logging
from nova import rpc
from nova import service
from nova import test
+from nova import utils
from nova.auth import manager
from nova.compute import power_state
from nova.api.ec2 import cloud
-from nova.objectstore import image
+from nova.api.ec2 import ec2utils
+from nova.image import local
+from nova.exception import NotFound
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.cloud')
-# Temp dirs for working with image attributes through the cloud controller
-# (stole this from objectstore_unittest.py)
-OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
-IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images')
-os.makedirs(IMAGES_PATH)
-
-# TODO(termie): these tests are rather fragile, they should at the lest be
-# wiping database state after each run
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
- self.flags(connection_type='fake',
- images_path=IMAGES_PATH)
+ self.flags(connection_type='fake')
self.conn = rpc.Connection.instance()
@@ -68,6 +62,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
+ self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
@@ -76,6 +71,13 @@ class CloudTestCase(test.TestCase):
project=self.project)
host = self.network.get_network_host(self.context.elevated())
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine'}}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show)
+ self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
+
def tearDown(self):
network_ref = db.project_get_network(self.context,
self.project.id)
@@ -122,7 +124,7 @@ class CloudTestCase(test.TestCase):
self.cloud.allocate_address(self.context)
inst = db.instance_create(self.context, {'host': self.compute.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
- ec2_id = cloud.id_to_ec2_id(inst['id'])
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@@ -158,12 +160,12 @@ class CloudTestCase(test.TestCase):
vol2 = db.volume_create(self.context, {})
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
- volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x')
+ volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
result = self.cloud.describe_volumes(self.context,
volume_id=[volume_id])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(
- cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
+ ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
@@ -188,8 +190,10 @@ class CloudTestCase(test.TestCase):
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_id': 1,
'host': 'host1'})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_id': 1,
'host': 'host2'})
comp1 = db.service_create(self.context, {'host': 'host1',
'availability_zone': 'zone1',
@@ -200,7 +204,7 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
- instance_id = cloud.id_to_ec2_id(inst2['id'])
+ instance_id = ec2utils.id_to_ec2_id(inst2['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
@@ -214,11 +218,39 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
+ def test_describe_images(self):
+ describe_images = self.cloud.describe_images
+
+ def fake_detail(meh, context):
+ return [{'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine'}}]
+
+ def fake_show_none(meh, context, id):
+ raise NotFound
+
+ self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
+ # list all
+ result1 = describe_images(self.context)
+ result1 = result1['imagesSet'][0]
+ self.assertEqual(result1['imageId'], 'ami-00000001')
+ # provided a valid image_id
+ result2 = describe_images(self.context, ['ami-00000001'])
+ self.assertEqual(1, len(result2['imagesSet']))
+ # provide more than 1 valid image_id
+ result3 = describe_images(self.context, ['ami-00000001',
+ 'ami-00000002'])
+ self.assertEqual(2, len(result3['imagesSet']))
+ # provide an non-existing image_id
+ self.stubs.UnsetAll()
+ self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
+ self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
+ self.assertRaises(NotFound, describe_images,
+ self.context, ['ami-fake'])
+
def test_console_output(self):
- image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
max_count = 1
- kwargs = {'image_id': image_id,
+ kwargs = {'image_id': 'ami-1',
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
@@ -234,8 +266,7 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
def test_ajax_console(self):
- image_id = FLAGS.default_image
- kwargs = {'image_id': image_id}
+ kwargs = {'image_id': 'ami-1'}
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
greenthread.sleep(0.3)
@@ -267,7 +298,7 @@ class CloudTestCase(test.TestCase):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
- keys = result["keypairsSet"]
+ keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
@@ -310,44 +341,9 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
- @staticmethod
- def _fake_set_image_description(ctxt, image_id, description):
- from nova.objectstore import handler
-
- class req:
- pass
-
- request = req()
- request.context = ctxt
- request.args = {'image_id': [image_id],
- 'description': [description]}
-
- resource = handler.ImagesResource()
- resource.render_POST(request)
-
- def test_user_editable_image_endpoint(self):
- pathdir = os.path.join(FLAGS.images_path, 'ami-testing')
- os.mkdir(pathdir)
- info = {'isPublic': False}
- with open(os.path.join(pathdir, 'info.json'), 'w') as f:
- json.dump(info, f)
- img = image.Image('ami-testing')
- # self.cloud.set_image_description(self.context, 'ami-testing',
- # 'Foo Img')
- # NOTE(vish): Above won't work unless we start objectstore or create
- # a fake version of api/ec2/images.py conn that can
- # call methods directly instead of going through boto.
- # for now, just cheat and call the method directly
- self._fake_set_image_description(self.context, 'ami-testing',
- 'Foo Img')
- self.assertEqual('Foo Img', img.metadata['description'])
- self._fake_set_image_description(self.context, 'ami-testing', '')
- self.assertEqual('', img.metadata['description'])
- shutil.rmtree(pathdir)
-
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
- ec2_id = cloud.id_to_ec2_id(inst['id'])
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get(self.context, inst['id'])
@@ -365,7 +361,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_display_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
- cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
+ ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
display_name='c00l v0lum3')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual('c00l v0lum3', vol['display_name'])
@@ -374,7 +370,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_wont_update_private_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
- cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
+ ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
mountpoint='/not/here')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index b049ac943..1b0f426d2 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,6 +20,7 @@ Tests For Compute
"""
import datetime
+import mox
from nova import compute
from nova import context
@@ -27,14 +28,28 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
-
+from nova.compute import instance_types
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
+from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
+
+
+class FakeTime(object):
+ def __init__(self):
+ self.counter = 0
+
+ def sleep(self, t):
+ self.counter += t
class ComputeTestCase(test.TestCase):
@@ -51,15 +66,20 @@ class ComputeTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show)
+
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
super(ComputeTestCase, self).tearDown()
- def _create_instance(self):
+ def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
@@ -67,8 +87,24 @@ class ComputeTestCase(test.TestCase):
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
+ inst.update(params)
return db.instance_create(self.context, inst)['id']
+ def _create_instance_type(self, params={}):
+ """Create a test instance"""
+ context = self.context.elevated()
+ inst = {}
+ inst['name'] = 'm1.small'
+ inst['memory_mb'] = '1024'
+ inst['vcpus'] = '1'
+ inst['local_gb'] = '20'
+ inst['flavorid'] = '1'
+ inst['swap'] = '2048'
+ inst['rxtx_quota'] = 100
+ inst['rxtx_cap'] = 200
+ inst.update(params)
+ return db.instance_type_create(context, inst)['id']
+
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
@@ -76,6 +112,21 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project.id}
return db.security_group_create(self.context, values)
+ def _get_dummy_instance(self):
+ """Get mock-return-value instance object
+ Use this when any testcase executed later than test_run_terminate
+ """
+ vol1 = models.Volume()
+ vol1['id'] = 1
+ vol2 = models.Volume()
+ vol2['id'] = 2
+ instance_ref = models.Instance()
+ instance_ref['id'] = 1
+ instance_ref['volumes'] = [vol1, vol2]
+ instance_ref['hostname'] = 'i-00000001'
+ instance_ref['host'] = 'dummy'
+ return instance_ref
+
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -235,6 +286,16 @@ class ComputeTestCase(test.TestCase):
console = self.compute.get_ajax_console(self.context,
instance_id)
+ self.assert_(set(['token', 'host', 'port']).issubset(console.keys()))
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_vnc_console(self):
+ """Make sure we can a vnc console for an instance."""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ console = self.compute.get_vnc_console(self.context,
+ instance_id)
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
@@ -266,3 +327,341 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_resize_instance(self):
+ """Ensure instance can be migrated/resized"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+
+ self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id, {'host': 'foo'})
+ self.compute.prep_resize(context, instance_id, 1)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ instance_id, 'pre-migrating')
+ self.compute.resize_instance(context, instance_id,
+ migration_ref['id'])
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_invalid_flavor_fails(self):
+ """Ensure invalid flavors raise"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+ self.compute.run_instance(self.context, instance_id)
+
+ self.assertRaises(exception.NotFound, self.compute_api.resize,
+ context, instance_id, 200)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_down_fails(self):
+ """Ensure resizing down raises and fails"""
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+
+ self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id,
+ {'instance_type': 'm1.xlarge'})
+
+ self.assertRaises(exception.ApiError, self.compute_api.resize,
+ context, instance_id, 1)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_same_size_fails(self):
+ """Ensure invalid flavors raise"""
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+
+ self.compute.run_instance(self.context, instance_id)
+
+ self.assertRaises(exception.ApiError, self.compute_api.resize,
+ context, instance_id, 1)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_get_by_flavor_id(self):
+ type = instance_types.get_by_flavor_id(1)
+ self.assertEqual(type, 'm1.tiny')
+
+ def test_resize_same_source_fails(self):
+ """Ensure instance fails to migrate when source and destination are
+ the same host"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.assertRaises(exception.Error, self.compute.prep_resize,
+ self.context, instance_id, 1)
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def _setup_other_managers(self):
+ self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_driver = utils.import_object(FLAGS.compute_driver)
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ """Confirm raising exception if instance doesn't have fixed_ip."""
+ instance_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NotFound,
+ self.compute.pre_live_migration,
+ c, instance_ref['id'], time=FakeTime())
+
+ def test_pre_live_migration_instance_has_volume(self):
+ """Confirm setup_compute_volume is called when volume is mounted."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ volmock = self.mox.CreateMock(self.volume_manager)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ vid = i_ref['volumes'][i]['id']
+ volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.volume_manager = volmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_instance_has_no_volume(self):
+ """Confirm log meg when instance doesn't mount any volumes."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ self.mox.StubOutWithMock(compute_manager.LOG, 'info')
+ compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'], time=FakeTime())
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_setup_compute_node_fail(self):
+ """Confirm operation setup_compute_network() fails.
+
+ It retries and raise exception when timeout exceeded.
+
+ """
+
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ volmock = self.mox.CreateMock(self.volume_manager)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
+ for i in range(FLAGS.live_migration_retry_count):
+ netmock.setup_compute_network(c, i_ref['id']).\
+ AndRaise(exception.ProcessExecutionError())
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.volume_manager = volmock
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.compute.pre_live_migration,
+ c, i_ref['id'], time=FakeTime())
+
+ def test_live_migration_works_correctly_with_volume(self):
+ """Confirm check_for_export to confirm volume health check."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_live_migration_dest_raises_exception(self):
+ """Confirm exception when pre_live_migration fails."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_dest_raises_exception_no_volume(self):
+ """Same as above test(input pattern is different) """
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_works_correctly_no_volume(self):
+ """Confirm live_migration() works as expected correctly."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_post_live_migration_working_correctly(self):
+ """Confirm post_live_migration() works as expected correctly."""
+ dest = 'desthost'
+ flo_addr = '1.2.1.2'
+
+ # Preparing datas
+ c = context.get_admin_context()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(c, instance_id)
+ db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+ v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
+ fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
+ 'instance_id': instance_id})
+ fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
+ flo_ref = db.floating_ip_create(c, {'address': flo_addr,
+ 'fixed_ip_id': fix_ref['id']})
+ # reload is necessary before setting mocks
+ i_ref = db.instance_get(c, instance_id)
+
+ # Preparing mocks
+ self.mox.StubOutWithMock(self.compute.volume_manager,
+ 'remove_compute_volume')
+ for v in i_ref['volumes']:
+ self.compute.volume_manager.remove_compute_volume(c, v['id'])
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(i_ref)
+
+ # executing
+ self.mox.ReplayAll()
+ ret = self.compute.post_live_migration(c, i_ref, dest)
+
+ # make sure every data is rewritten to dest
+ i_ref = db.instance_get(c, i_ref['id'])
+ c1 = (i_ref['host'] == dest)
+ flo_refs = db.floating_ip_get_all_by_host(c, dest)
+ c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr)
+
+ # post operaton
+ self.assertTrue(c1 and c2)
+ db.instance_destroy(c, instance_id)
+ db.volume_destroy(c, v_ref['id'])
+ db.floating_ip_destroy(c, flo_addr)
+
+ def test_run_kill_vm(self):
+ """Detect when a vm is terminated behind the scenes"""
+ instance_id = self._create_instance()
+
+ self.compute.run_instance(self.context, instance_id)
+
+ instances = db.instance_get_all(context.get_admin_context())
+ LOG.info(_("Running instances: %s"), instances)
+ self.assertEqual(len(instances), 1)
+
+ instance_name = instances[0].name
+ self.compute.driver.test_remove_vm(instance_name)
+
+ # Force the compute manager to do its periodic poll
+ error_list = self.compute.periodic_tasks(context.get_admin_context())
+ self.assertFalse(error_list)
+
+ instances = db.instance_get_all(context.get_admin_context())
+ LOG.info(_("After force-killing instances: %s"), instances)
+ self.assertEqual(len(instances), 0)
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
index 49ff24413..d47c70d88 100644
--- a/nova/tests/test_console.py
+++ b/nova/tests/test_console.py
@@ -57,7 +57,7 @@ class ConsoleTestCase(test.TestCase):
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py
index b6bfab534..588a24b35 100644
--- a/nova/tests/test_direct.py
+++ b/nova/tests/test_direct.py
@@ -25,12 +25,18 @@ import webob
from nova import compute
from nova import context
from nova import exception
+from nova import network
from nova import test
+from nova import volume
from nova import utils
from nova.api import direct
from nova.tests import test_cloud
+class ArbitraryObject(object):
+ pass
+
+
class FakeService(object):
def echo(self, context, data):
return {'data': data}
@@ -39,6 +45,9 @@ class FakeService(object):
return {'user': context.user_id,
'project': context.project_id}
+ def invalid_return(self, context):
+ return ArbitraryObject()
+
class DirectTestCase(test.TestCase):
def setUp(self):
@@ -59,6 +68,7 @@ class DirectTestCase(test.TestCase):
req.headers['X-OpenStack-User'] = 'user1'
req.headers['X-OpenStack-Project'] = 'proj1'
resp = req.get_response(self.auth_router)
+ self.assertEqual(resp.status_int, 200)
data = json.loads(resp.body)
self.assertEqual(data['user'], 'user1')
self.assertEqual(data['project'], 'proj1')
@@ -69,6 +79,7 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'json=%s' % json.dumps({'data': 'foo'})
resp = req.get_response(self.router)
+ self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
@@ -78,9 +89,16 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'data=foo'
resp = req.get_response(self.router)
+ self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
+ def test_invalid(self):
+ req = webob.Request.blank('/fake/invalid_return')
+ req.environ['openstack.context'] = self.context
+ req.method = 'POST'
+ self.assertRaises(exception.Error, req.get_response, self.router)
+
def test_proxy(self):
proxy = direct.Proxy(self.router)
rv = proxy.fake.echo(self.context, data='baz')
@@ -90,13 +108,20 @@ class DirectTestCase(test.TestCase):
class DirectCloudTestCase(test_cloud.CloudTestCase):
def setUp(self):
super(DirectCloudTestCase, self).setUp()
- compute_handle = compute.API(image_service=self.cloud.image_service,
- network_api=self.cloud.network_api,
- volume_api=self.cloud.volume_api)
+ compute_handle = compute.API(image_service=self.cloud.image_service)
+ volume_handle = volume.API()
+ network_handle = network.API()
direct.register_service('compute', compute_handle)
+ direct.register_service('volume', volume_handle)
+ direct.register_service('network', network_handle)
+
self.router = direct.JsonParamsMiddleware(direct.Router())
proxy = direct.Proxy(self.router)
self.cloud.compute_api = proxy.compute
+ self.cloud.volume_api = proxy.volume
+ self.cloud.network_api = proxy.network
+ compute_handle.volume_api = proxy.volume
+ compute_handle.network_api = proxy.network
def tearDown(self):
super(DirectCloudTestCase, self).tearDown()
diff --git a/nova/tests/test_flat_network.py b/nova/tests/test_flat_network.py
new file mode 100644
index 000000000..dcc617e25
--- /dev/null
+++ b/nova/tests/test_flat_network.py
@@ -0,0 +1,161 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for flat network code
+"""
+import IPy
+import os
+import unittest
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.tests.network import base
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+class FlatNetworkTestCase(base.NetworkTestCase):
+ """Test cases for network code"""
+ def test_public_network_association(self):
+ """Makes sure that we can allocate a public ip"""
+ # TODO(vish): better way of adding floating ips
+
+ self.context._project = self.projects[0]
+ self.context.project_id = self.projects[0].id
+ pubnet = IPy.IP(flags.FLAGS.floating_range)
+ address = str(pubnet[0])
+ try:
+ db.floating_ip_get_by_address(context.get_admin_context(), address)
+ except exception.NotFound:
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'host': FLAGS.host})
+
+ self.assertRaises(NotImplementedError,
+ self.network.allocate_floating_ip,
+ self.context, self.projects[0].id)
+
+ fix_addr = self._create_address(0)
+ float_addr = address
+ self.assertRaises(NotImplementedError,
+ self.network.associate_floating_ip,
+ self.context, float_addr, fix_addr)
+
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, None)
+
+ self.assertRaises(NotImplementedError,
+ self.network.disassociate_floating_ip,
+ self.context, float_addr)
+
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, None)
+
+ self.assertRaises(NotImplementedError,
+ self.network.deallocate_floating_ip,
+ self.context, float_addr)
+
+ self.network.deallocate_fixed_ip(self.context, fix_addr)
+ db.floating_ip_destroy(context.get_admin_context(), float_addr)
+
+ def test_allocate_deallocate_fixed_ip(self):
+ """Makes sure that we can allocate and deallocate a fixed ip"""
+ address = self._create_address(0)
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self._deallocate_address(0, address)
+
+ # check if the fixed ip address is really deallocated
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ def test_side_effects(self):
+ """Ensures allocating and releasing has no side effects"""
+ address = self._create_address(0)
+ address2 = self._create_address(1, self.instance2_id)
+
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ self._deallocate_address(0, address)
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ # First address release shouldn't affect the second
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[0].id))
+
+ self._deallocate_address(1, address2)
+ self.assertFalse(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ def test_ips_are_reused(self):
+ """Makes sure that ip addresses that are deallocated get reused"""
+ address = self._create_address(0)
+ self.network.deallocate_fixed_ip(self.context, address)
+
+ address2 = self._create_address(0)
+ self.assertEqual(address, address2)
+
+ self.network.deallocate_fixed_ip(self.context, address2)
+
+ def test_too_many_addresses(self):
+ """Test for a NoMoreAddresses exception when all fixed ips are used.
+ """
+ admin_context = context.get_admin_context()
+ network = db.project_get_network(admin_context, self.projects[0].id)
+ num_available_ips = db.network_count_available_ips(admin_context,
+ network['id'])
+ addresses = []
+ instance_ids = []
+ for i in range(num_available_ips):
+ instance_ref = self._create_instance(0)
+ instance_ids.append(instance_ref['id'])
+ address = self._create_address(0, instance_ref['id'])
+ addresses.append(address)
+
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, 0)
+ self.assertRaises(db.NoMoreAddresses,
+ self.network.allocate_fixed_ip,
+ self.context,
+ 'foo')
+
+ for i in range(num_available_ips):
+ self.network.deallocate_fixed_ip(self.context, addresses[i])
+ db.instance_destroy(context.get_admin_context(), instance_ids[i])
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, num_available_ips)
+
+ def run(self, result=None):
+ if(FLAGS.network_manager == 'nova.network.manager.FlatManager'):
+ super(FlatNetworkTestCase, self).run(result)
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
new file mode 100644
index 000000000..edc538879
--- /dev/null
+++ b/nova/tests/test_instance_types.py
@@ -0,0 +1,86 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for instance types code
+"""
+import time
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.compute import instance_types
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.compute')
+
+
+class InstanceTypeTestCase(test.TestCase):
+ """Test cases for instance type code"""
+ def setUp(self):
+ super(InstanceTypeTestCase, self).setUp()
+ session = get_session()
+ max_flavorid = session.query(models.InstanceTypes).\
+ order_by("flavorid desc").\
+ first()
+ self.flavorid = max_flavorid["flavorid"] + 1
+ self.name = str(int(time.time()))
+
+ def test_instance_type_create_then_delete(self):
+ """Ensure instance types can be created"""
+ starting_inst_list = instance_types.get_all_types()
+ instance_types.create(self.name, 256, 1, 120, self.flavorid)
+ new = instance_types.get_all_types()
+ self.assertNotEqual(len(starting_inst_list),
+ len(new),
+ 'instance type was not created')
+ instance_types.destroy(self.name)
+ self.assertEqual(1,
+ instance_types.get_instance_type(self.name)["deleted"])
+ self.assertEqual(starting_inst_list, instance_types.get_all_types())
+ instance_types.purge(self.name)
+ self.assertEqual(len(starting_inst_list),
+ len(instance_types.get_all_types()),
+ 'instance type not purged')
+
+ def test_get_all_instance_types(self):
+ """Ensures that all instance types can be retrieved"""
+ session = get_session()
+ total_instance_types = session.query(models.InstanceTypes).\
+ count()
+ inst_types = instance_types.get_all_types()
+ self.assertEqual(total_instance_types, len(inst_types))
+
+ def test_invalid_create_args_should_fail(self):
+ """Ensures that instance type creation fails with invalid args"""
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 0, 1, 120, self.flavorid)
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 256, -1, 120, self.flavorid)
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 256, 1, "aa", self.flavorid)
+
+ def test_non_existant_inst_type_shouldnt_delete(self):
+ """Ensures that instance type creation fails with invalid args"""
+ self.assertRaises(exception.ApiError,
+ instance_types.destroy, "sfsfsdfdfs")
diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py
index 393d71038..a25809a79 100644
--- a/nova/tests/test_localization.py
+++ b/nova/tests/test_localization.py
@@ -21,9 +21,10 @@ import sys
import unittest
import nova
+from nova import test
-class LocalizationTestCase(unittest.TestCase):
+class LocalizationTestCase(test.TestCase):
def test_multiple_positional_format_placeholders(self):
pat = re.compile("\W_\(")
single_pat = re.compile("\W%\W")
diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py
index 9d49167ba..6564a6955 100644
--- a/nova/tests/test_middleware.py
+++ b/nova/tests/test_middleware.py
@@ -40,12 +40,12 @@ def conditional_forbid(req):
class LockoutTestCase(test.TestCase):
"""Test case for the Lockout middleware."""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self): # pylint: disable=C0103
super(LockoutTestCase, self).setUp()
utils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self): # pylint: disable=C0103
utils.clear_time_override()
super(LockoutTestCase, self).tearDown()
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index e6da6112a..4e17e1ce0 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -14,26 +14,33 @@
# License for the specific language governing permissions and limitations
# under the License.
+import errno
import os
+import select
+
+from eventlet import greenpool
+from eventlet import greenthread
from nova import test
+from nova import utils
from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
- if os.path.exists('.bzr'):
+ topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
+ if os.path.exists(os.path.join(topdir, '.bzr')):
contributors = set()
- mailmap = parse_mailmap('.mailmap')
+ mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
import bzrlib.workingtree
- tree = bzrlib.workingtree.WorkingTree.open('.')
+ tree = bzrlib.workingtree.WorkingTree.open(topdir)
tree.lock_read()
try:
parents = tree.get_parent_ids()
g = tree.branch.repository.get_graph()
- for p in parents[1:]:
+ for p in parents:
rev_ids = [r for r, _ in g.iter_ancestry(parents)
if r != "null:"]
revs = tree.branch.repository.get_revisions(rev_ids)
@@ -42,7 +49,8 @@ class ProjectTestCase(test.TestCase):
email = author.split(' ')[-1]
contributors.add(str_dict_replace(email, mailmap))
- authors_file = open('Authors', 'r').read()
+ authors_file = open(os.path.join(topdir, 'Authors'),
+ 'r').read()
missing = set()
for contributor in contributors:
@@ -55,3 +63,78 @@ class ProjectTestCase(test.TestCase):
'%r not listed in Authors' % missing)
finally:
tree.unlock()
+
+
+class LockTestCase(test.TestCase):
+ def test_synchronized_wrapped_function_metadata(self):
+ @utils.synchronized('whatever')
+ def foo():
+ """Bar"""
+ pass
+ self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring "
+ "got lost")
+ self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
+ "got mangled")
+
+ def test_synchronized_internally(self):
+ """We can lock across multiple green threads"""
+ saved_sem_num = len(utils._semaphores)
+ seen_threads = list()
+
+ @utils.synchronized('testlock2', external=False)
+ def f(id):
+ for x in range(10):
+ seen_threads.append(id)
+ greenthread.sleep(0)
+
+ threads = []
+ pool = greenpool.GreenPool(10)
+ for i in range(10):
+ threads.append(pool.spawn(f, i))
+
+ for thread in threads:
+ thread.wait()
+
+ self.assertEquals(len(seen_threads), 100)
+ # Looking at the seen threads, split it into chunks of 10, and verify
+ # that the last 9 match the first in each chunk.
+ for i in range(10):
+ for j in range(9):
+ self.assertEquals(seen_threads[i * 10],
+ seen_threads[i * 10 + 1 + j])
+
+ self.assertEqual(saved_sem_num, len(utils._semaphores),
+ "Semaphore leak detected")
+
+ def test_synchronized_externally(self):
+ """We can lock across multiple processes"""
+ rpipe1, wpipe1 = os.pipe()
+ rpipe2, wpipe2 = os.pipe()
+
+ @utils.synchronized('testlock1', external=True)
+ def f(rpipe, wpipe):
+ try:
+ os.write(wpipe, "foo")
+ except OSError, e:
+ self.assertEquals(e.errno, errno.EPIPE)
+ return
+
+ rfds, _, __ = select.select([rpipe], [], [], 1)
+ self.assertEquals(len(rfds), 0, "The other process, which was"
+ " supposed to be locked, "
+ "wrote on its end of the "
+ "pipe")
+ os.close(rpipe)
+
+ pid = os.fork()
+ if pid > 0:
+ os.close(wpipe1)
+ os.close(rpipe2)
+
+ f(rpipe1, wpipe2)
+ else:
+ os.close(rpipe1)
+ os.close(wpipe2)
+
+ f(rpipe2, wpipe1)
+ os._exit(0)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index ce1c77210..77f6aaff3 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -21,349 +21,146 @@ Unit Tests for network code
import IPy
import os
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova import log as logging
from nova import test
-from nova import utils
-from nova.auth import manager
+from nova.network import linux_net
+
+
+class IptablesManagerTestCase(test.TestCase):
+ sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*filter',
+ ':INPUT ACCEPT [2223527:305688874]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [2172501:140856656]',
+ ':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-filter-top - [0:0]',
+ '-A FORWARD -j nova-filter-top ',
+ '-A OUTPUT -j nova-filter-top ',
+ '-A nova-filter-top -j nova-compute-local ',
+ '-A INPUT -j nova-compute-INPUT ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A FORWARD -j nova-compute-FORWARD ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-A FORWARD -o virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ '-A FORWARD -i virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [3936:762355]',
+ ':INPUT ACCEPT [2447:225266]',
+ ':OUTPUT ACCEPT [63491:4191863]',
+ ':POSTROUTING ACCEPT [63112:4108641]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]',
+ ':nova-postrouting-bottom - [0:0]',
+ '-A PREROUTING -j nova-compute-PREROUTING ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A POSTROUTING -j nova-compute-POSTROUTING ',
+ '-A POSTROUTING -j nova-postrouting-bottom ',
+ '-A nova-postrouting-bottom -j nova-compute-SNATTING ',
+ '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
-FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.tests.network')
-
-
-class NetworkTestCase(test.TestCase):
- """Test cases for network code"""
def setUp(self):
- super(NetworkTestCase, self).setUp()
- # NOTE(vish): if you change these flags, make sure to change the
- # flags in the corresponding section in nova-dhcpbridge
- self.flags(connection_type='fake',
- fake_call=True,
- fake_network=True)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
- self.projects = []
- self.network = utils.import_object(FLAGS.network_manager)
- self.context = context.RequestContext(project=None, user=self.user)
- for i in range(FLAGS.num_networks):
- name = 'project%s' % i
- project = self.manager.create_project(name, 'netuser', name)
- self.projects.append(project)
- # create the necessary network data for the project
- user_context = context.RequestContext(project=self.projects[i],
- user=self.user)
- host = self.network.get_network_host(user_context.elevated())
- instance_ref = self._create_instance(0)
- self.instance_id = instance_ref['id']
- instance_ref = self._create_instance(1)
- self.instance2_id = instance_ref['id']
-
- def tearDown(self):
- # TODO(termie): this should really be instantiating clean datastores
- # in between runs, one failure kills all the tests
- db.instance_destroy(context.get_admin_context(), self.instance_id)
- db.instance_destroy(context.get_admin_context(), self.instance2_id)
- for project in self.projects:
- self.manager.delete_project(project)
- self.manager.delete_user(self.user)
- super(NetworkTestCase, self).tearDown()
-
- def _create_instance(self, project_num, mac=None):
- if not mac:
- mac = utils.generate_mac()
- project = self.projects[project_num]
- self.context._project = project
- self.context.project_id = project.id
- return db.instance_create(self.context,
- {'project_id': project.id,
- 'mac_address': mac})
-
- def _create_address(self, project_num, instance_id=None):
- """Create an address in given project num"""
- if instance_id is None:
- instance_id = self.instance_id
- self.context._project = self.projects[project_num]
- self.context.project_id = self.projects[project_num].id
- return self.network.allocate_fixed_ip(self.context, instance_id)
-
- def _deallocate_address(self, project_num, address):
- self.context._project = self.projects[project_num]
- self.context.project_id = self.projects[project_num].id
- self.network.deallocate_fixed_ip(self.context, address)
-
- def test_private_ipv6(self):
- """Make sure ipv6 is OK"""
- if FLAGS.use_ipv6:
- instance_ref = self._create_instance(0)
- address = self._create_address(0, instance_ref['id'])
- network_ref = db.project_get_network(
- context.get_admin_context(),
- self.context.project_id)
- address_v6 = db.instance_get_fixed_address_v6(
- context.get_admin_context(),
- instance_ref['id'])
- self.assertEqual(instance_ref['mac_address'],
- utils.to_mac(address_v6))
- instance_ref2 = db.fixed_ip_get_instance_v6(
- context.get_admin_context(),
- address_v6)
- self.assertEqual(instance_ref['id'], instance_ref2['id'])
- self.assertEqual(address_v6,
- utils.to_global_ipv6(
- network_ref['cidr_v6'],
- instance_ref['mac_address']))
- self._deallocate_address(0, address)
- db.instance_destroy(context.get_admin_context(),
- instance_ref['id'])
-
- def test_public_network_association(self):
- """Makes sure that we can allocaate a public ip"""
- # TODO(vish): better way of adding floating ips
- self.context._project = self.projects[0]
- self.context.project_id = self.projects[0].id
- pubnet = IPy.IP(flags.FLAGS.floating_range)
- address = str(pubnet[0])
- try:
- db.floating_ip_get_by_address(context.get_admin_context(), address)
- except exception.NotFound:
- db.floating_ip_create(context.get_admin_context(),
- {'address': address,
- 'host': FLAGS.host})
- float_addr = self.network.allocate_floating_ip(self.context,
- self.projects[0].id)
- fix_addr = self._create_address(0)
- lease_ip(fix_addr)
- self.assertEqual(float_addr, str(pubnet[0]))
- self.network.associate_floating_ip(self.context, float_addr, fix_addr)
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, float_addr)
- self.network.disassociate_floating_ip(self.context, float_addr)
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, None)
- self.network.deallocate_floating_ip(self.context, float_addr)
- self.network.deallocate_fixed_ip(self.context, fix_addr)
- release_ip(fix_addr)
- db.floating_ip_destroy(context.get_admin_context(), float_addr)
-
- def test_allocate_deallocate_fixed_ip(self):
- """Makes sure that we can allocate and deallocate a fixed ip"""
- address = self._create_address(0)
- self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
- lease_ip(address)
- self._deallocate_address(0, address)
-
- # Doesn't go away until it's dhcp released
- self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
-
- release_ip(address)
- self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
-
- def test_side_effects(self):
- """Ensures allocating and releasing has no side effects"""
- address = self._create_address(0)
- address2 = self._create_address(1, self.instance2_id)
-
- self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
- self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
- self.assertFalse(is_allocated_in_project(address, self.projects[1].id))
-
- # Addresses are allocated before they're issued
- lease_ip(address)
- lease_ip(address2)
-
- self._deallocate_address(0, address)
- release_ip(address)
- self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
-
- # First address release shouldn't affect the second
- self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
-
- self._deallocate_address(1, address2)
- release_ip(address2)
- self.assertFalse(is_allocated_in_project(address2,
- self.projects[1].id))
-
- def test_subnet_edge(self):
- """Makes sure that private ips don't overlap"""
- first = self._create_address(0)
- lease_ip(first)
- instance_ids = []
- for i in range(1, FLAGS.num_networks):
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address = self._create_address(i, instance_ref['id'])
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address2 = self._create_address(i, instance_ref['id'])
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address3 = self._create_address(i, instance_ref['id'])
- lease_ip(address)
- lease_ip(address2)
- lease_ip(address3)
- self.context._project = self.projects[i]
- self.context.project_id = self.projects[i].id
- self.assertFalse(is_allocated_in_project(address,
- self.projects[0].id))
- self.assertFalse(is_allocated_in_project(address2,
- self.projects[0].id))
- self.assertFalse(is_allocated_in_project(address3,
- self.projects[0].id))
- self.network.deallocate_fixed_ip(self.context, address)
- self.network.deallocate_fixed_ip(self.context, address2)
- self.network.deallocate_fixed_ip(self.context, address3)
- release_ip(address)
- release_ip(address2)
- release_ip(address3)
- for instance_id in instance_ids:
- db.instance_destroy(context.get_admin_context(), instance_id)
- self.context._project = self.projects[0]
- self.context.project_id = self.projects[0].id
- self.network.deallocate_fixed_ip(self.context, first)
- self._deallocate_address(0, first)
- release_ip(first)
-
- def test_vpn_ip_and_port_looks_valid(self):
- """Ensure the vpn ip and port are reasonable"""
- self.assert_(self.projects[0].vpn_ip)
- self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
- self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
- FLAGS.num_networks)
-
- def test_too_many_networks(self):
- """Ensure error is raised if we run out of networks"""
- projects = []
- networks_left = (FLAGS.num_networks -
- db.network_count(context.get_admin_context()))
- for i in range(networks_left):
- project = self.manager.create_project('many%s' % i, self.user)
- projects.append(project)
- db.project_get_network(context.get_admin_context(), project.id)
- project = self.manager.create_project('last', self.user)
- projects.append(project)
- self.assertRaises(db.NoMoreNetworks,
- db.project_get_network,
- context.get_admin_context(),
- project.id)
- for project in projects:
- self.manager.delete_project(project)
-
- def test_ips_are_reused(self):
- """Makes sure that ip addresses that are deallocated get reused"""
- address = self._create_address(0)
- lease_ip(address)
- self.network.deallocate_fixed_ip(self.context, address)
- release_ip(address)
-
- address2 = self._create_address(0)
- self.assertEqual(address, address2)
- lease_ip(address)
- self.network.deallocate_fixed_ip(self.context, address2)
- release_ip(address)
-
- def test_available_ips(self):
- """Make sure the number of available ips for the network is correct
-
- The number of available IP addresses depends on the test
- environment's setup.
-
- Network size is set in test fixture's setUp method.
-
- There are ips reserved at the bottom and top of the range.
- services (network, gateway, CloudPipe, broadcast)
- """
- network = db.project_get_network(context.get_admin_context(),
- self.projects[0].id)
- net_size = flags.FLAGS.network_size
- admin_context = context.get_admin_context()
- total_ips = (db.network_count_available_ips(admin_context,
- network['id']) +
- db.network_count_reserved_ips(admin_context,
- network['id']) +
- db.network_count_allocated_ips(admin_context,
- network['id']))
- self.assertEqual(total_ips, net_size)
-
- def test_too_many_addresses(self):
- """Test for a NoMoreAddresses exception when all fixed ips are used.
- """
- admin_context = context.get_admin_context()
- network = db.project_get_network(admin_context, self.projects[0].id)
- num_available_ips = db.network_count_available_ips(admin_context,
- network['id'])
- addresses = []
- instance_ids = []
- for i in range(num_available_ips):
- instance_ref = self._create_instance(0)
- instance_ids.append(instance_ref['id'])
- address = self._create_address(0, instance_ref['id'])
- addresses.append(address)
- lease_ip(address)
-
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, 0)
- self.assertRaises(db.NoMoreAddresses,
- self.network.allocate_fixed_ip,
- self.context,
- 'foo')
-
- for i in range(num_available_ips):
- self.network.deallocate_fixed_ip(self.context, addresses[i])
- release_ip(addresses[i])
- db.instance_destroy(context.get_admin_context(), instance_ids[i])
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, num_available_ips)
-
-
-def is_allocated_in_project(address, project_id):
- """Returns true if address is in specified project"""
- project_net = db.project_get_network(context.get_admin_context(),
- project_id)
- network = db.fixed_ip_get_network(context.get_admin_context(), address)
- instance = db.fixed_ip_get_instance(context.get_admin_context(), address)
- # instance exists until release
- return instance is not None and network['id'] == project_net['id']
-
-
-def binpath(script):
- """Returns the absolute path to a script in bin"""
- return os.path.abspath(os.path.join(__file__, "../../../bin", script))
-
-
-def lease_ip(private_ip):
- """Run add command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(),
- private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
- private_ip)
- cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
- instance_ref['mac_address'],
- private_ip)
- env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
- 'TESTING': '1',
- 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
- LOG.debug("ISSUE_IP: %s, %s ", out, err)
-
-
-def release_ip(private_ip):
- """Run del command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(),
- private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
- private_ip)
- cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
- instance_ref['mac_address'],
- private_ip)
- env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
- 'TESTING': '1',
- 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
- LOG.debug("RELEASE_IP: %s, %s ", out, err)
+ super(IptablesManagerTestCase, self).setUp()
+ self.manager = linux_net.IptablesManager()
+
+ def test_filter_rules_are_wrapped(self):
+ current_lines = self.sample_filter
+
+ table = self.manager.ipv4['filter']
+ table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' in new_lines)
+
+ table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' not in new_lines)
+
+ def test_nat_rules(self):
+ current_lines = self.sample_nat
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['nat'])
+
+ for line in [':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains "
+ "went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ last_postrouting_line = ''
+
+ for line in new_lines:
+ if line.startswith('-A POSTROUTING'):
+ last_postrouting_line = line
+
+ self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
+ "Last POSTROUTING rule does not jump to "
+ "nova-postouting-bottom: %s" % last_postrouting_line)
+
+ for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
+
+ def test_filter_rules(self):
+ current_lines = self.sample_filter
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'])
+
+ for line in [':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains"
+ " went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ for chain in ['FORWARD', 'OUTPUT']:
+ for line in new_lines:
+ if line.startswith('-A %s' % chain):
+ self.assertTrue('-j nova-filter-top' in line,
+ "First %s rule does not "
+ "jump to nova-filter-top" % chain)
+ break
+
+ self.assertTrue('-A nova-filter-top '
+ '-j run_tests.py-local' in new_lines,
+ "nova-filter-top does not jump to wrapped local chain")
+
+ for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
new file mode 100644
index 000000000..c78772f27
--- /dev/null
+++ b/nova/tests/test_objectstore.py
@@ -0,0 +1,148 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unittets for S3 objectstore clone.
+"""
+
+import boto
+import glob
+import hashlib
+import os
+import shutil
+import tempfile
+
+from boto import exception as boto_exception
+from boto.s3 import connection as s3
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova import wsgi
+from nova import test
+from nova.auth import manager
+from nova.objectstore import s3server
+
+
+FLAGS = flags.FLAGS
+
+# Create a unique temporary directory. We don't delete after test to
+# allow checking the contents after running tests. Users and/or tools
+# running the tests need to remove the tests directories.
+OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
+
+# Create bucket/images path
+os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
+os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
+
+
+class S3APITestCase(test.TestCase):
+ """Test objectstore through S3 API."""
+
+ def setUp(self):
+ """Setup users, projects, and start a test server."""
+ super(S3APITestCase, self).setUp()
+ self.flags(auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
+ buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
+ s3_host='127.0.0.1')
+
+ self.auth_manager = manager.AuthManager()
+ self.admin_user = self.auth_manager.create_user('admin', admin=True)
+ self.admin_project = self.auth_manager.create_project('admin',
+ self.admin_user)
+
+ shutil.rmtree(FLAGS.buckets_path)
+ os.mkdir(FLAGS.buckets_path)
+
+ router = s3server.S3Application(FLAGS.buckets_path)
+ server = wsgi.Server()
+ server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+
+ if not boto.config.has_section('Boto'):
+ boto.config.add_section('Boto')
+ boto.config.set('Boto', 'num_retries', '0')
+ conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
+ aws_secret_access_key=self.admin_user.secret,
+ host=FLAGS.s3_host,
+ port=FLAGS.s3_port,
+ is_secure=False,
+ calling_format=s3.OrdinaryCallingFormat())
+ self.conn = conn
+
+ def get_http_connection(host, is_secure):
+ """Get a new S3 connection, don't attempt to reuse connections."""
+ return self.conn.new_http_connection(host, is_secure)
+
+ self.conn.get_http_connection = get_http_connection
+
+ def _ensure_no_buckets(self, buckets): # pylint: disable=C0111
+ self.assertEquals(len(buckets), 0, "Bucket list was not empty")
+ return True
+
+ def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111
+ self.assertEquals(len(buckets), 1,
+ "Bucket list didn't have exactly one element in it")
+ self.assertEquals(buckets[0].name, name, "Wrong name")
+ return True
+
+ def test_000_list_buckets(self):
+ """Make sure we are starting with no buckets."""
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_001_create_and_delete_bucket(self):
+ """Test bucket creation and deletion."""
+ bucket_name = 'testbucket'
+
+ self.conn.create_bucket(bucket_name)
+ self._ensure_one_bucket(self.conn.get_all_buckets(), bucket_name)
+ self.conn.delete_bucket(bucket_name)
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_002_create_bucket_and_key_and_delete_key_again(self):
+ """Test key operations on buckets."""
+ bucket_name = 'testbucket'
+ key_name = 'somekey'
+ key_contents = 'somekey'
+
+ b = self.conn.create_bucket(bucket_name)
+ k = b.new_key(key_name)
+ k.set_contents_from_string(key_contents)
+
+ bucket = self.conn.get_bucket(bucket_name)
+
+ # make sure the contents are correct
+ key = bucket.get_key(key_name)
+ self.assertEquals(key.get_contents_as_string(), key_contents,
+ "Bad contents")
+
+ # delete the key
+ key.delete()
+
+ self._ensure_no_buckets(bucket.get_all_keys())
+
+ def test_unknown_bucket(self):
+ bucket_name = 'falalala'
+ self.assertRaises(boto_exception.S3ResponseError,
+ self.conn.get_bucket,
+ bucket_name)
+
+ def tearDown(self):
+ """Tear down auth and test server."""
+ self.auth_manager.delete_user('admin')
+ self.auth_manager.delete_project('admin')
+ super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 1e42fddf3..c65bc459d 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -20,11 +20,12 @@ from nova import compute
from nova import context
from nova import db
from nova import flags
+from nova import network
from nova import quota
from nova import test
from nova import utils
+from nova import volume
from nova.auth import manager
-from nova.api.ec2 import cloud
from nova.compute import instance_types
@@ -32,6 +33,12 @@ FLAGS = flags.FLAGS
class QuotaTestCase(test.TestCase):
+
+ class StubImageService(object):
+
+ def show(self, *args, **kwargs):
+ return {"properties": {}}
+
def setUp(self):
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
@@ -41,7 +48,6 @@ class QuotaTestCase(test.TestCase):
quota_gigabytes=20,
quota_floating_ips=1)
- self.cloud = cloud.CloudController()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
@@ -57,7 +63,7 @@ class QuotaTestCase(test.TestCase):
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
@@ -74,19 +80,30 @@ class QuotaTestCase(test.TestCase):
vol['size'] = size
return db.volume_create(self.context, vol)['id']
+ def _get_instance_type(self, name):
+ instance_types = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+ return instance_types[name]
+
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 10)
# metadata_items
@@ -107,12 +124,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
- self.assertRaises(quota.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
- image_id='fake')
+ image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -120,12 +137,12 @@ class QuotaTestCase(test.TestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
- self.assertRaises(quota.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
- image_id='fake')
+ image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -134,9 +151,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
- self.assertRaises(quota.QuotaError, self.cloud.create_volume,
- self.context,
- size=10)
+ self.assertRaises(quota.QuotaError,
+ volume.API().create,
+ self.context,
+ size=10,
+ name='',
+ description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@@ -145,9 +165,11 @@ class QuotaTestCase(test.TestCase):
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(quota.QuotaError,
- self.cloud.create_volume,
+ volume.API().create,
self.context,
- size=10)
+ size=10,
+ name='',
+ description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@@ -161,7 +183,8 @@ class QuotaTestCase(test.TestCase):
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
- self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
+ self.assertRaises(quota.QuotaError,
+ network.API().allocate_floating_ip,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)
@@ -176,3 +199,67 @@ class QuotaTestCase(test.TestCase):
instance_type='m1.small',
image_id='fake',
metadata=metadata)
+
+ def test_allowed_injected_files(self):
+ self.assertEqual(
+ quota.allowed_injected_files(self.context),
+ FLAGS.quota_max_injected_files)
+
+ def _create_with_injected_files(self, files):
+ api = compute.API(image_service=self.StubImageService())
+ api.create(self.context, min_count=1, max_count=1,
+ instance_type='m1.small', image_id='fake',
+ injected_files=files)
+
+ def test_no_injected_files(self):
+ api = compute.API(image_service=self.StubImageService())
+ api.create(self.context, instance_type='m1.small', image_id='fake')
+
+ def test_max_injected_files(self):
+ files = []
+ for i in xrange(FLAGS.quota_max_injected_files):
+ files.append(('/my/path%d' % i, 'config = test\n'))
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_files(self):
+ files = []
+ for i in xrange(FLAGS.quota_max_injected_files + 1):
+ files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_allowed_injected_file_content_bytes(self):
+ self.assertEqual(
+ quota.allowed_injected_file_content_bytes(self.context),
+ FLAGS.quota_max_injected_file_content_bytes)
+
+ def test_max_injected_file_content_bytes(self):
+ max = FLAGS.quota_max_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max)])
+ files = [('/test/path', content)]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_content_bytes(self):
+ max = FLAGS.quota_max_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max + 1)])
+ files = [('/test/path', content)]
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_allowed_injected_file_path_bytes(self):
+ self.assertEqual(
+ quota.allowed_injected_file_path_bytes(self.context),
+ FLAGS.quota_max_injected_file_path_bytes)
+
+ def test_max_injected_file_path_bytes(self):
+ max = FLAGS.quota_max_injected_file_path_bytes
+ path = ''.join(['a' for i in xrange(max)])
+ files = [(path, 'config = quotatest')]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_path_bytes(self):
+ max = FLAGS.quota_max_injected_file_path_bytes
+ path = ''.join(['a' for i in xrange(max + 1)])
+ files = [(path, 'config = quotatest')]
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 4820e04fb..44d7c91eb 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -36,7 +36,7 @@ class RpcTestCase(test.TestCase):
super(RpcTestCase, self).setUp()
self.conn = rpc.Connection.instance(True)
self.receiver = TestReceiver()
- self.consumer = rpc.AdapterConsumer(connection=self.conn,
+ self.consumer = rpc.TopicAdapterConsumer(connection=self.conn,
topic='test',
proxy=self.receiver)
self.consumer.attach_to_eventlet()
@@ -97,7 +97,7 @@ class RpcTestCase(test.TestCase):
nested = Nested()
conn = rpc.Connection.instance(True)
- consumer = rpc.AdapterConsumer(connection=conn,
+ consumer = rpc.TopicAdapterConsumer(connection=conn,
topic='nested',
proxy=nested)
consumer.attach_to_eventlet()
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index b6888c4d2..6df74dd61 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -20,23 +20,32 @@ Tests For Scheduler
"""
import datetime
+import mox
+import novaclient.exceptions
+import stubout
+import webob
from mox import IgnoreArg
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import service
from nova import test
from nova import rpc
from nova import utils
from nova.auth import manager as auth_manager
+from nova.scheduler import api
from nova.scheduler import manager
from nova.scheduler import driver
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('instances_path', 'nova.compute.manager')
class TestDriver(driver.Scheduler):
@@ -54,6 +63,34 @@ class SchedulerTestCase(test.TestCase):
super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
+ def _create_compute_service(self):
+ """Create compute-manager(ComputeNode and Service record)."""
+ ctxt = context.get_admin_context()
+ dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ s_ref = db.service_create(ctxt, dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ db.compute_node_create(ctxt, dic)
+
+ return db.service_get(ctxt, s_ref['id'])
+
+ def _create_instance(self, **kwargs):
+ """Create a test instance"""
+ ctxt = context.get_admin_context()
+ inst = {}
+ inst['user_id'] = 'admin'
+ inst['project_id'] = kwargs.get('project_id', 'fake')
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['vcpus'] = kwargs.get('vcpus', 1)
+ inst['memory_mb'] = kwargs.get('memory_mb', 10)
+ inst['local_gb'] = kwargs.get('local_gb', 20)
+ return db.instance_create(ctxt, inst)
+
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
@@ -76,6 +113,73 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
+ def test_show_host_resources_host_not_exit(self):
+ """A host given as an argument does not exists."""
+
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+
+ try:
+ scheduler.show_host_resources(ctxt, dest)
+ except exception.NotFound, e:
+ c1 = (e.message.find(_("does not exist or is not a "
+ "compute node.")) >= 0)
+ self.assertTrue(c1)
+
+ def _dic_is_equal(self, dic1, dic2, keys=None):
+ """Compares 2 dictionary contents(Helper method)"""
+ if not keys:
+ keys = ['vcpus', 'memory_mb', 'local_gb',
+ 'vcpus_used', 'memory_mb_used', 'local_gb_used']
+
+ for key in keys:
+ if not (dic1[key] == dic2[key]):
+ return False
+ return True
+
+ def test_show_host_resources_no_project(self):
+ """No instance are running on the given host."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ # result checking
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'] == {}
+ self.assertTrue(c1 and c2 and c3)
+ db.service_destroy(ctxt, s_ref['id'])
+
+ def test_show_host_resources_works_correctly(self):
+ """Show_host_resources() works correctly as expected."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+ i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host'])
+ i_ref2 = self._create_instance(project_id='p-02', vcpus=3,
+ host=s_ref['host'])
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'].keys() == ['p-01', 'p-02']
+ keys = ['vcpus', 'memory_mb', 'local_gb']
+ c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
+ c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys)
+ self.assertTrue(c1 and c2 and c3 and c4 and c5)
+
+ db.service_destroy(ctxt, s_ref['id'])
+ db.instance_destroy(ctxt, i_ref1['id'])
+ db.instance_destroy(ctxt, i_ref2['id'])
+
class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
@@ -155,26 +259,235 @@ class SimpleDriverTestCase(test.TestCase):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
+ inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0
- inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['memory_mb'] = kwargs.get('memory_mb', 20)
+ inst['local_gb'] = kwargs.get('local_gb', 30)
+ inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
+ inst['state_description'] = kwargs.get('state_description', 'running')
+ inst['state'] = kwargs.get('state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
"""Create a test volume"""
vol = {}
- vol['image_id'] = 'ami-test'
- vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
+ def _create_compute_service(self, **kwargs):
+ """Create a compute service."""
+
+ dic = {'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ dic['host'] = kwargs.get('host', 'dummy')
+ s_ref = db.service_create(self.context, dic)
+ if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
+ t = datetime.datetime.utcnow() - datetime.timedelta(0)
+ dic['created_at'] = kwargs.get('created_at', t)
+ dic['updated_at'] = kwargs.get('updated_at', t)
+ db.service_update(self.context, s_ref['id'], dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
+ dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
+ dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
+ db.compute_node_create(self.context, dic)
+ return db.service_get(self.context, s_ref['id'])
+
+ def test_doesnt_report_disabled_hosts_as_up(self):
+ """Ensures driver doesn't find hosts before they are enabled"""
+ # NOTE(vish): constructing service without create method
+ # because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ db.service_update(self.context, s2['id'], {'disabled': True})
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(0, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_reports_enabled_hosts_as_up(self):
+ """Ensures driver can find the hosts that are up"""
+ # NOTE(vish): constructing service without create method
+ # because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(2, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_instance(self):
+ """Ensures the host with less cores gets the next one"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance()
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual(host, 'host2')
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_specific_host_gets_instance(self):
+ """Ensures if you set availability_zone it launches on that zone"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_wont_sechedule_if_specified_host_is_down(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ now = datetime.datetime.utcnow()
+ delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
+ past = now - delta
+ db.service_update(self.context, s1['id'], {'updated_at': past})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ self.assertRaises(driver.WillNotSchedule,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id2)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_will_schedule_on_disabled_host_if_specified(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_too_many_cores(self):
+ """Ensures we don't go over max cores"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_ids1 = []
+ instance_ids2 = []
+ for index in xrange(FLAGS.max_cores):
+ instance_id = self._create_instance()
+ compute1.run_instance(self.context, instance_id)
+ instance_ids1.append(instance_id)
+ instance_id = self._create_instance()
+ compute2.run_instance(self.context, instance_id)
+ instance_ids2.append(instance_id)
+ instance_id = self._create_instance()
+ self.assertRaises(driver.NoValidHost,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id)
+ for instance_id in instance_ids1:
+ compute1.terminate_instance(self.context, instance_id)
+ for instance_id in instance_ids2:
+ compute2.terminate_instance(self.context, instance_id)
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_volume(self):
+ """Ensures the host with less gigabytes gets the next one"""
+ volume1 = service.Service('host1',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume1.start()
+ volume2 = service.Service('host2',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume2.start()
+ volume_id1 = self._create_volume()
+ volume1.create_volume(self.context, volume_id1)
+ volume_id2 = self._create_volume()
+ host = self.scheduler.driver.schedule_create_volume(self.context,
+ volume_id2)
+ self.assertEqual(host, 'host2')
+ volume1.delete_volume(self.context, volume_id1)
+ db.volume_destroy(self.context, volume_id2)
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
compute1 = self.start_service('compute', host='host1')
@@ -318,3 +631,470 @@ class SimpleDriverTestCase(test.TestCase):
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()
+
+ def test_scheduler_live_migration_with_volume(self):
+ """scheduler_live_migration() works correctly as expected.
+
+ Also, checks instance state is changed from 'running' -> 'migrating'.
+
+ """
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, dic)
+
+ # cannot check 2nd argument b/c the addresses of instance object
+ # is different.
+ driver_i = self.scheduler.driver
+ nocare = mox.IgnoreArg()
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_src_check(nocare, nocare)
+ driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'])
+ driver_i._live_migration_common_check(nocare, nocare, i_ref['host'])
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs = {'instance_id': instance_id, 'dest': i_ref['host']}
+ rpc.cast(self.context,
+ db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
+ {"method": 'live_migration', "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(self.context, FLAGS.compute_topic,
+ instance_id=instance_id,
+ dest=i_ref['host'])
+
+ i_ref = db.instance_get(self.context, instance_id)
+ self.assertTrue(i_ref['state_description'] == 'migrating')
+ db.instance_destroy(self.context, instance_id)
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_instance_not_running(self):
+ """The instance given by instance_id is not running."""
+
+ instance_id = self._create_instance(state_description='migrating')
+ i_ref = db.instance_get(self.context, instance_id)
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not running') > 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+
+ def test_live_migration_src_check_volume_node_not_alive(self):
+ """Raise exception when volume node is not alive."""
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, {'instance_id': instance_id,
+ 'size': 1})
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
+ dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
+ 'topic': 'volume', 'report_count': 0}
+ s_ref = db.service_create(self.context, dic)
+
+ try:
+ self.scheduler.driver.schedule_live_migration(self.context,
+ instance_id,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('volume node is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_compute_node_not_alive(self):
+ """Confirms src-compute node is alive."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_src_check_works_correctly(self):
+ """Confirms this method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ ret = self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_not_alive(self):
+ """Confirms exception raises in case dest host does not exist."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_same_host(self):
+ """Confirms exceptioin raises in case dest and src is same host."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('choose other host') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_lack_memory(self):
+ """Confirms exception raises when dest doesn't have enough memory."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=12)
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ except exception.NotEmpty, e:
+ c = (e.message.find('Unable to migrate') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_works_correctly(self):
+ """Confirms method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=5)
+
+ ret = self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_orig_not_exists(self):
+ """Destination host does not exist."""
+
+ dest = 'dummydest'
+ # mocks for live_migration_common_check()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
+ host=dest)
+
+ # mocks for mounted_on_same_shared_storage()
+ fpath = '/test/20110127120000'
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ topic = FLAGS.compute_topic
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(self.context, topic, dest),
+ {"method": 'create_shared_storage_test_file'}).AndReturn(fpath)
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']),
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': fpath}})
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, dest),
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': fpath}})
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find('does not exist') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_different_hypervisor(self):
+ """Original host and dest host has different hypervisor type."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen')
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Different hypervisor type')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_service_different_version(self):
+ """Original host and dest host has different hypervisor version."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest,
+ hypervisor_version=12002)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Older hypervisor version')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_checking_cpuinfo_fail(self):
+ """Raise excetion when original host doen't have compatible cpu."""
+
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
+ rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
+ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except rpc.RemoteError, e:
+ c = (e.message.find(_("doesn't have compatibility to")) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+
+class FakeZone(object):
+ def __init__(self, api_url, username, password):
+ self.api_url = api_url
+ self.username = username
+ self.password = password
+
+
+def zone_get_all(context):
+ return [
+ FakeZone('http://example.com', 'bob', 'xxx'),
+ ]
+
+
+class FakeRerouteCompute(api.reroute_compute):
+ def _call_child_zones(self, zones, function):
+ return []
+
+ def get_collection_context_and_id(self, args, kwargs):
+ return ("servers", None, 1)
+
+ def unmarshall_result(self, zone_responses):
+ return dict(magic="found me")
+
+
+def go_boom(self, context, instance):
+ raise exception.InstanceNotFound("boom message", instance)
+
+
+def found_instance(self, context, instance):
+ return dict(name='myserver')
+
+
+class FakeResource(object):
+ def __init__(self, attribute_dict):
+ for k, v in attribute_dict.iteritems():
+ setattr(self, k, v)
+
+ def pause(self):
+ pass
+
+
+class ZoneRedirectTest(test.TestCase):
+ def setUp(self):
+ super(ZoneRedirectTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+
+ self.stubs.Set(db, 'zone_get_all', zone_get_all)
+
+ self.enable_zone_routing = FLAGS.enable_zone_routing
+ FLAGS.enable_zone_routing = True
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.enable_zone_routing = self.enable_zone_routing
+ super(ZoneRedirectTest, self).tearDown()
+
+ def test_trap_found_locally(self):
+ decorator = FakeRerouteCompute("foo")
+ try:
+ result = decorator(found_instance)(None, None, 1)
+ except api.RedirectResult, e:
+ self.fail(_("Successful database hit should succeed"))
+
+ def test_trap_not_found_locally(self):
+ decorator = FakeRerouteCompute("foo")
+ try:
+ result = decorator(go_boom)(None, None, 1)
+ self.assertFail(_("Should have rerouted."))
+ except api.RedirectResult, e:
+ self.assertEquals(e.results['magic'], 'found me')
+
+ def test_routing_flags(self):
+ FLAGS.enable_zone_routing = False
+ decorator = FakeRerouteCompute("foo")
+ try:
+ result = decorator(go_boom)(None, None, 1)
+ self.assertFail(_("Should have thrown exception."))
+ except exception.InstanceNotFound, e:
+ self.assertEquals(e.message, 'boom message')
+
+ def test_get_collection_context_and_id(self):
+ decorator = api.reroute_compute("foo")
+ self.assertEquals(decorator.get_collection_context_and_id(
+ (None, 10, 20), {}), ("servers", 10, 20))
+ self.assertEquals(decorator.get_collection_context_and_id(
+ (None, 11,), dict(instance_id=21)), ("servers", 11, 21))
+ self.assertEquals(decorator.get_collection_context_and_id(
+ (None,), dict(context=12, instance_id=22)), ("servers", 12, 22))
+
+ def test_unmarshal_single_server(self):
+ decorator = api.reroute_compute("foo")
+ self.assertEquals(decorator.unmarshall_result([]), {})
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(a=1, b=2)), ]),
+ dict(server=dict(a=1, b=2)))
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(a=1, _b=2)), ]),
+ dict(server=dict(a=1,)))
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(a=1, manager=2)), ]),
+ dict(server=dict(a=1,)))
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(_a=1, manager=2)), ]),
+ dict(server={}))
+
+
+class FakeServerCollection(object):
+ def get(self, instance_id):
+ return FakeResource(dict(a=10, b=20))
+
+ def find(self, name):
+ return FakeResource(dict(a=11, b=22))
+
+
+class FakeEmptyServerCollection(object):
+ def get(self, f):
+ raise novaclient.NotFound(1)
+
+ def find(self, name):
+ raise novaclient.NotFound(2)
+
+
+class FakeNovaClient(object):
+ def __init__(self, collection):
+ self.servers = collection
+
+
+class DynamicNovaClientTest(test.TestCase):
+ def test_issue_novaclient_command_found(self):
+ zone = FakeZone('http://example.com', 'bob', 'xxx')
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeServerCollection()),
+ zone, "servers", "get", 100).a, 10)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeServerCollection()),
+ zone, "servers", "find", "name").b, 22)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeServerCollection()),
+ zone, "servers", "pause", 100), None)
+
+ def test_issue_novaclient_command_not_found(self):
+ zone = FakeZone('http://example.com', 'bob', 'xxx')
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeEmptyServerCollection()),
+ zone, "servers", "get", 100), None)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeEmptyServerCollection()),
+ zone, "servers", "find", "name"), None)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeEmptyServerCollection()),
+ zone, "servers", "any", "name"), None)
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 45d9afa6c..d48de2057 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
@@ -108,20 +109,29 @@ class ServiceTestCase(test.TestCase):
app = service.Service.create(host=host, binary=binary)
self.mox.StubOutWithMock(rpc,
- 'AdapterConsumer',
+ 'TopicAdapterConsumer',
use_mock_anything=True)
- rpc.AdapterConsumer(connection=mox.IgnoreArg(),
+ self.mox.StubOutWithMock(rpc,
+ 'FanoutAdapterConsumer',
+ use_mock_anything=True)
+ rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
topic=topic,
proxy=mox.IsA(service.Service)).AndReturn(
- rpc.AdapterConsumer)
+ rpc.TopicAdapterConsumer)
- rpc.AdapterConsumer(connection=mox.IgnoreArg(),
+ rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
topic='%s.%s' % (topic, host),
proxy=mox.IsA(service.Service)).AndReturn(
- rpc.AdapterConsumer)
+ rpc.TopicAdapterConsumer)
- rpc.AdapterConsumer.attach_to_eventlet()
- rpc.AdapterConsumer.attach_to_eventlet()
+ rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(),
+ topic=topic,
+ proxy=mox.IsA(service.Service)).AndReturn(
+ rpc.FanoutAdapterConsumer)
+
+ rpc.TopicAdapterConsumer.attach_to_eventlet()
+ rpc.TopicAdapterConsumer.attach_to_eventlet()
+ rpc.FanoutAdapterConsumer.attach_to_eventlet()
service_create = {'host': host,
'binary': binary,
@@ -251,3 +261,44 @@ class ServiceTestCase(test.TestCase):
serv.report_state()
self.assert_(not serv.model_disconnected)
+
+ def test_compute_can_update_available_resource(self):
+ """Confirm compute updates their record of compute-service table."""
+ host = 'foo'
+ binary = 'nova-compute'
+ topic = 'compute'
+
+ # Any mocks are not working without UnsetStubs() here.
+ self.mox.UnsetStubs()
+ ctxt = context.get_admin_context()
+ service_ref = db.service_create(ctxt, {'host': host,
+ 'binary': binary,
+ 'topic': topic})
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.compute.manager.ComputeManager')
+
+ # This testcase want to test calling update_available_resource.
+ # No need to call periodic call, then below variable must be set 0.
+ serv.report_interval = 0
+ serv.periodic_interval = 0
+
+ # Creating mocks
+ self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ self.mox.StubOutWithMock(serv.manager.driver,
+ 'update_available_resource')
+ serv.manager.driver.update_available_resource(mox.IgnoreArg(), host)
+
+ # Just doing start()-stop(), not confirm new db record is created,
+ # because update_available_resource() works only in
+ # libvirt environment. This testcase confirms
+ # update_available_resource() is called. Otherwise, mox complains.
+ self.mox.ReplayAll()
+ serv.start()
+ serv.stop()
+
+ db.service_destroy(ctxt, service_ref['id'])
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
index e237674e6..35c838065 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/test_test.py
@@ -34,7 +34,7 @@ class IsolationTestCase(test.TestCase):
def test_rpc_consumer_isolation(self):
connection = rpc.Connection.instance(new=True)
- consumer = rpc.TopicConsumer(connection, topic='compute')
+ consumer = rpc.TopicAdapterConsumer(connection, topic='compute')
consumer.register_callback(
lambda x, y: self.fail('I should never be called'))
consumer.attach_to_eventlet()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 34a407f1a..e08d229b0 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -14,11 +14,89 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import tempfile
+
from nova import test
from nova import utils
from nova import exception
+class ExecuteTestCase(test.TestCase):
+ def test_retry_on_failure(self):
+ fd, tmpfilename = tempfile.mkstemp()
+ _, tmpfilename2 = tempfile.mkstemp()
+ try:
+ fp = os.fdopen(fd, 'w+')
+ fp.write('''#!/bin/sh
+# If stdin fails to get passed during one of the runs, make a note.
+if ! grep -q foo
+then
+ echo 'failure' > "$1"
+fi
+# If stdin has failed to get passed during this or a previous run, exit early.
+if grep failure "$1"
+then
+ exit 1
+fi
+runs="$(cat $1)"
+if [ -z "$runs" ]
+then
+ runs=0
+fi
+runs=$(($runs + 1))
+echo $runs > "$1"
+exit 1
+''')
+ fp.close()
+ os.chmod(tmpfilename, 0755)
+ self.assertRaises(exception.ProcessExecutionError,
+ utils.execute,
+ tmpfilename, tmpfilename2, attempts=10,
+ process_input='foo',
+ delay_on_retry=False)
+ fp = open(tmpfilename2, 'r+')
+ runs = fp.read()
+ fp.close()
+ self.assertNotEquals(runs.strip(), 'failure', 'stdin did not '
+ 'always get passed '
+ 'correctly')
+ runs = int(runs.strip())
+ self.assertEquals(runs, 10,
+ 'Ran %d times instead of 10.' % (runs,))
+ finally:
+ os.unlink(tmpfilename)
+ os.unlink(tmpfilename2)
+
+ def test_unknown_kwargs_raises_error(self):
+ self.assertRaises(exception.Error,
+ utils.execute,
+ '/bin/true', this_is_not_a_valid_kwarg=True)
+
+ def test_no_retry_on_success(self):
+ fd, tmpfilename = tempfile.mkstemp()
+ _, tmpfilename2 = tempfile.mkstemp()
+ try:
+ fp = os.fdopen(fd, 'w+')
+ fp.write('''#!/bin/sh
+# If we've already run, bail out.
+grep -q foo "$1" && exit 1
+# Mark that we've run before.
+echo foo > "$1"
+# Check that stdin gets passed correctly.
+grep foo
+''')
+ fp.close()
+ os.chmod(tmpfilename, 0755)
+ utils.execute(tmpfilename,
+ tmpfilename2,
+ process_input='foo',
+ attempts=2)
+ finally:
+ os.unlink(tmpfilename)
+ os.unlink(tmpfilename2)
+
+
class GetFromPathTestCase(test.TestCase):
def test_tolerates_nones(self):
f = utils.get_from_path
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index f151ae911..958c8e3e2 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -14,33 +14,123 @@
# License for the specific language governing permissions and limitations
# under the License.
+import eventlet
+import mox
+import os
+import re
+import sys
+
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
from nova.virt import libvirt_conn
+libvirt = None
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
+def _concurrency(wait, done, target):
+ wait.wait()
+ done.send()
+
+
+class CacheConcurrencyTestCase(test.TestCase):
+ def setUp(self):
+ super(CacheConcurrencyTestCase, self).setUp()
+
+ def fake_exists(fname):
+ basedir = os.path.join(FLAGS.instances_path, '_base')
+ if fname == basedir:
+ return True
+ return False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_exists)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def test_same_fname_concurrency(self):
+ """Ensures that the same fname cache runs at a sequentially"""
+ conn = libvirt_conn.LibvirtConnection
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname', False, wait1, done1)
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname', False, wait2, done2)
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertFalse(done2.ready())
+ finally:
+ wait1.send()
+ done1.wait()
+ eventlet.sleep(0)
+ self.assertTrue(done2.ready())
+
+ def test_different_fname_concurrency(self):
+ """Ensures that two different fname caches are concurrent"""
+ conn = libvirt_conn.LibvirtConnection
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname2', False, wait1, done1)
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname1', False, wait2, done2)
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertTrue(done2.ready())
+ finally:
+ wait1.send()
+ eventlet.sleep(0)
+
+
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
+
+ try:
+ pjs = self.manager.get_projects()
+ pjs = [p for p in pjs if p.name == 'fake']
+ if 0 != len(pjs):
+ self.manager.delete_project(pjs[0])
+
+ users = self.manager.get_users()
+ users = [u for u in users if u.name == 'fake']
+ if 0 != len(users):
+ self.manager.delete_user(users[0])
+ except Exception, e:
+ pass
+
+ users = self.manager.get_users()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
+ self.context = context.get_admin_context()
FLAGS.instances_path = ''
+ self.call_libvirt_dependant_setup = False
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
@@ -52,6 +142,58 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
+ def lazy_load_library_exists(self):
+ """check if libvirt is available."""
+ # try to connect libvirt. if fail, skip test.
+ try:
+ import libvirt
+ import libxml2
+ except ImportError:
+ return False
+ global libvirt
+ libvirt = __import__('libvirt')
+ libvirt_conn.libvirt = __import__('libvirt')
+ libvirt_conn.libxml2 = __import__('libxml2')
+ return True
+
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtConnection(libvirt is not used)."""
+
+ # A fake libvirt.virConnect
+ class FakeLibvirtConnection(object):
+ pass
+
+ # A fake libvirt_conn.IptablesFirewallDriver
+ class FakeIptablesFirewallDriver(object):
+
+ def __init__(self, **kwargs):
+ pass
+
+ def setattr(self, key, val):
+ self.__setattr__(key, val)
+
+ # Creating mocks
+ fake = FakeLibvirtConnection()
+ fakeip = FakeIptablesFirewallDriver
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ # Inevitable mocks for libvirt_conn.LibvirtConnection
+ self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
+ libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
+ libvirt_conn.LibvirtConnection._conn = fake
+
+ def create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': 'zone'}
+
+ return db.service_create(context.get_admin_context(), service_ref)
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -83,6 +225,49 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=True)
+ def test_lxc_container_and_uri(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_container(instance_data)
+
+ def _check_xml_and_container(self, instance):
+ user_context = context.RequestContext(project=self.project,
+ user=self.user)
+ instance_ref = db.instance_create(user_context, instance)
+ host = self.network.get_network_host(user_context.elevated())
+ network_ref = db.project_get_network(context.get_admin_context(),
+ self.project.id)
+
+ fixed_ip = {'address': self.test_ip,
+ 'network_id': network_ref['id']}
+
+ ctxt = context.get_admin_context()
+ fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
+ db.fixed_ip_update(ctxt, self.test_ip,
+ {'allocated': True,
+ 'instance_id': instance_ref['id']})
+
+ self.flags(libvirt_type='lxc')
+ conn = libvirt_conn.LibvirtConnection(True)
+
+ uri = conn.get_uri()
+ self.assertEquals(uri, 'lxc:///')
+
+ xml = conn.to_xml(instance_ref)
+ tree = xml_to_tree(xml)
+
+ check = [
+ (lambda t: t.find('.').get('type'), 'lxc'),
+ (lambda t: t.find('./os/type').text, 'exe'),
+ (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
+
+ for i, (check, expected_result) in enumerate(check):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed common check %d' % (xml, i))
+
+ target = tree.find('./devices/filesystem/source').get('dir')
+ self.assertTrue(len(target) > 0)
+
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=False):
user_context = context.RequestContext(project=self.project,
@@ -191,8 +376,8 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically
- # set uri
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
@@ -206,6 +391,163 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
+ def test_update_available_resource_works_correctly(self):
+ """Confirm compute_node table is updated successfully."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+
+ # Prepare mocks
+ def getVersion():
+ return 12003
+
+ def getType():
+ return 'qemu'
+
+ def listDomainsID():
+ return []
+
+ service_ref = self.create_service(host='dummy')
+ self.create_fake_libvirt_mock(getVersion=getVersion,
+ getType=getType,
+ listDomainsID=listDomainsID)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ 'get_cpu_info')
+ libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.update_available_resource(self.context, 'dummy')
+ service_ref = db.service_get(self.context, service_ref['id'])
+ compute_node = service_ref['compute_node'][0]
+
+ if sys.platform.upper() == 'LINUX2':
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] > 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] > 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+ else:
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] == 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] == 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+
+ db.service_destroy(self.context, service_ref['id'])
+ FLAGS.instances_path = org_path
+
+ def test_update_resource_info_no_compute_record_found(self):
+ """Raise exception if no recorde found on services table."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+ self.create_fake_libvirt_mock()
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(exception.Invalid,
+ conn.update_available_resource,
+ self.context, 'dummy')
+
+ FLAGS.instances_path = org_path
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ """ensure_filtering_fules_for_instance() finishes with timeout."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing mocks
+ def fake_none(self):
+ return
+
+ def fake_raise(self):
+ raise libvirt.libvirtError('ERR')
+
+ class FakeTime(object):
+ def __init__(self):
+ self.counter = 0
+
+ def sleep(self, t):
+ self.counter += t
+
+ fake_timer = FakeTime()
+
+ self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ try:
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
+ conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
+ conn.ensure_filtering_rules_for_instance(instance_ref,
+ time=fake_timer)
+ except exception.Error, e:
+ c1 = (0 <= e.message.find('Timeout migrating for'))
+ self.assertTrue(c1)
+
+ self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
+ "amount of time")
+
+ db.instance_destroy(self.context, instance_ref['id'])
+
+ def test_live_migration_raises_exception(self):
+ """Confirms recover method is called when exceptions are raised."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing data
+ self.compute = utils.import_object(FLAGS.compute_manager)
+ instance_dict = {'host': 'fake', 'state': power_state.RUNNING,
+ 'state_description': 'running'}
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ instance_ref = db.instance_update(self.context, instance_ref['id'],
+ instance_dict)
+ vol_dict = {'status': 'migrating', 'size': 1}
+ volume_ref = db.volume_create(self.context, vol_dict)
+ db.volume_attached(self.context, volume_ref['id'], instance_ref['id'],
+ '/dev/fake')
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndRaise(libvirt.libvirtError('ERR'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', '',
+ self.compute.recover_live_migration)
+
+ instance_ref = db.instance_get(self.context, instance_ref['id'])
+ self.assertTrue(instance_ref['state_description'] == 'running')
+ self.assertTrue(instance_ref['state'] == power_state.RUNNING)
+ volume_ref = db.volume_get(self.context, volume_ref['id'])
+ self.assertTrue(volume_ref['status'] == 'in-use')
+
+ db.volume_destroy(self.context, volume_ref['id'])
+ db.instance_destroy(self.context, instance_ref['id'])
+
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
@@ -234,16 +576,22 @@ class IptablesFirewallTestCase(test.TestCase):
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
- in_rules = [
+ in_nat_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ ]
+
+ in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
- '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
@@ -255,7 +603,7 @@ class IptablesFirewallTestCase(test.TestCase):
'# Completed on Mon Dec 6 11:54:13 2010',
]
- in6_rules = [
+ in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
@@ -315,23 +663,34 @@ class IptablesFirewallTestCase(test.TestCase):
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
# self.fw.add_instance(instance_ref)
- def fake_iptables_execute(cmd, process_input=None):
- if cmd == 'sudo ip6tables-save -t filter':
- return '\n'.join(self.in6_rules), None
- if cmd == 'sudo iptables-save -t filter':
- return '\n'.join(self.in_rules), None
- if cmd == 'sudo iptables-restore':
- self.out_rules = process_input.split('\n')
+ def fake_iptables_execute(*cmd, **kwargs):
+ process_input = kwargs.get('process_input', None)
+ if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
+ return '\n'.join(self.in6_filter_rules), None
+ if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
+ return '\n'.join(self.in_filter_rules), None
+ if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
+ return '\n'.join(self.in_nat_rules), None
+ if cmd == ('sudo', 'iptables-restore'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out_rules = lines
return '', ''
- if cmd == 'sudo ip6tables-restore':
- self.out6_rules = process_input.split('\n')
+ if cmd == ('sudo', 'ip6tables-restore'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out6_rules = lines
return '', ''
- self.fw.execute = fake_iptables_execute
+ print cmd, kwargs
+
+ from nova.network import linux_net
+ linux_net.iptables_manager.execute = fake_iptables_execute
self.fw.prepare_instance_filter(instance_ref)
self.fw.apply_instance_filter(instance_ref)
- in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self.in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self.out_rules,
@@ -354,17 +713,18 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
- security_group_chain in self.out_rules,
+ regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
- '8 -j ACCEPT' % security_group_chain in self.out_rules,
+ regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
+ '--icmp-type 8 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
- self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
- '--dports 80:81 -j ACCEPT' % security_group_chain \
- in self.out_rules,
+ regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
+ '--dports 80:81 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
@@ -479,7 +839,8 @@ class NWFilterTestCase(test.TestCase):
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
- 'project_id': 'fake'})
+ 'project_id': 'fake',
+ 'mac_address': '00:A0:C9:14:C8:29'})
inst_id = instance_ref['id']
ip = '10.11.12.13'
@@ -496,7 +857,8 @@ class NWFilterTestCase(test.TestCase):
'instance_id': instance_ref['id']})
def _ensure_all_called():
- instance_filter = 'nova-instance-%s' % instance_ref['name']
+ instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
+ '00A0C914C829')
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',
diff --git a/nova/tests/test_vlan_network.py b/nova/tests/test_vlan_network.py
new file mode 100644
index 000000000..063b81832
--- /dev/null
+++ b/nova/tests/test_vlan_network.py
@@ -0,0 +1,242 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for vlan network code
+"""
+import IPy
+import os
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.tests.network import base
+from nova.tests.network import binpath,\
+ lease_ip, release_ip
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+class VlanNetworkTestCase(base.NetworkTestCase):
+ """Test cases for network code"""
+ def test_public_network_association(self):
+ """Makes sure that we can allocaate a public ip"""
+ # TODO(vish): better way of adding floating ips
+ self.context._project = self.projects[0]
+ self.context.project_id = self.projects[0].id
+ pubnet = IPy.IP(flags.FLAGS.floating_range)
+ address = str(pubnet[0])
+ try:
+ db.floating_ip_get_by_address(context.get_admin_context(), address)
+ except exception.NotFound:
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'host': FLAGS.host})
+ float_addr = self.network.allocate_floating_ip(self.context,
+ self.projects[0].id)
+ fix_addr = self._create_address(0)
+ lease_ip(fix_addr)
+ self.assertEqual(float_addr, str(pubnet[0]))
+ self.network.associate_floating_ip(self.context, float_addr, fix_addr)
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, float_addr)
+ self.network.disassociate_floating_ip(self.context, float_addr)
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, None)
+ self.network.deallocate_floating_ip(self.context, float_addr)
+ self.network.deallocate_fixed_ip(self.context, fix_addr)
+ release_ip(fix_addr)
+ db.floating_ip_destroy(context.get_admin_context(), float_addr)
+
+ def test_allocate_deallocate_fixed_ip(self):
+ """Makes sure that we can allocate and deallocate a fixed ip"""
+ address = self._create_address(0)
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ lease_ip(address)
+ self._deallocate_address(0, address)
+
+ # Doesn't go away until it's dhcp released
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ release_ip(address)
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ def test_side_effects(self):
+ """Ensures allocating and releasing has no side effects"""
+ address = self._create_address(0)
+ address2 = self._create_address(1, self.instance2_id)
+
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[1].id))
+
+ # Addresses are allocated before they're issued
+ lease_ip(address)
+ lease_ip(address2)
+
+ self._deallocate_address(0, address)
+ release_ip(address)
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ # First address release shouldn't affect the second
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ self._deallocate_address(1, address2)
+ release_ip(address2)
+ self.assertFalse(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ def test_subnet_edge(self):
+ """Makes sure that private ips don't overlap"""
+ first = self._create_address(0)
+ lease_ip(first)
+ instance_ids = []
+ for i in range(1, FLAGS.num_networks):
+ instance_ref = self._create_instance(i, mac=utils.generate_mac())
+ instance_ids.append(instance_ref['id'])
+ address = self._create_address(i, instance_ref['id'])
+ instance_ref = self._create_instance(i, mac=utils.generate_mac())
+ instance_ids.append(instance_ref['id'])
+ address2 = self._create_address(i, instance_ref['id'])
+ instance_ref = self._create_instance(i, mac=utils.generate_mac())
+ instance_ids.append(instance_ref['id'])
+ address3 = self._create_address(i, instance_ref['id'])
+ lease_ip(address)
+ lease_ip(address2)
+ lease_ip(address3)
+ self.context._project = self.projects[i]
+ self.context.project_id = self.projects[i].id
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self.assertFalse(self._is_allocated_in_project(address2,
+ self.projects[0].id))
+ self.assertFalse(self._is_allocated_in_project(address3,
+ self.projects[0].id))
+ self.network.deallocate_fixed_ip(self.context, address)
+ self.network.deallocate_fixed_ip(self.context, address2)
+ self.network.deallocate_fixed_ip(self.context, address3)
+ release_ip(address)
+ release_ip(address2)
+ release_ip(address3)
+ for instance_id in instance_ids:
+ db.instance_destroy(context.get_admin_context(), instance_id)
+ self.context._project = self.projects[0]
+ self.context.project_id = self.projects[0].id
+ self.network.deallocate_fixed_ip(self.context, first)
+ self._deallocate_address(0, first)
+ release_ip(first)
+
+ def test_vpn_ip_and_port_looks_valid(self):
+ """Ensure the vpn ip and port are reasonable"""
+ self.assert_(self.projects[0].vpn_ip)
+ self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
+ self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
+ FLAGS.num_networks)
+
+ def test_too_many_networks(self):
+ """Ensure error is raised if we run out of networks"""
+ projects = []
+ networks_left = (FLAGS.num_networks -
+ db.network_count(context.get_admin_context()))
+ for i in range(networks_left):
+ project = self.manager.create_project('many%s' % i, self.user)
+ projects.append(project)
+ db.project_get_network(context.get_admin_context(), project.id)
+ project = self.manager.create_project('last', self.user)
+ projects.append(project)
+ self.assertRaises(db.NoMoreNetworks,
+ db.project_get_network,
+ context.get_admin_context(),
+ project.id)
+ for project in projects:
+ self.manager.delete_project(project)
+
+ def test_ips_are_reused(self):
+ """Makes sure that ip addresses that are deallocated get reused"""
+ address = self._create_address(0)
+ lease_ip(address)
+ self.network.deallocate_fixed_ip(self.context, address)
+ release_ip(address)
+
+ address2 = self._create_address(0)
+ self.assertEqual(address, address2)
+ lease_ip(address)
+ self.network.deallocate_fixed_ip(self.context, address2)
+ release_ip(address)
+
+ def test_too_many_addresses(self):
+ """Test for a NoMoreAddresses exception when all fixed ips are used.
+ """
+ admin_context = context.get_admin_context()
+ network = db.project_get_network(admin_context, self.projects[0].id)
+ num_available_ips = db.network_count_available_ips(admin_context,
+ network['id'])
+ addresses = []
+ instance_ids = []
+ for i in range(num_available_ips):
+ instance_ref = self._create_instance(0)
+ instance_ids.append(instance_ref['id'])
+ address = self._create_address(0, instance_ref['id'])
+ addresses.append(address)
+ lease_ip(address)
+
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, 0)
+ self.assertRaises(db.NoMoreAddresses,
+ self.network.allocate_fixed_ip,
+ self.context,
+ 'foo')
+
+ for i in range(num_available_ips):
+ self.network.deallocate_fixed_ip(self.context, addresses[i])
+ release_ip(addresses[i])
+ db.instance_destroy(context.get_admin_context(), instance_ids[i])
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, num_available_ips)
+
+ def _is_allocated_in_project(self, address, project_id):
+ """Returns true if address is in specified project"""
+ project_net = db.project_get_network(context.get_admin_context(),
+ project_id)
+ network = db.fixed_ip_get_network(context.get_admin_context(),
+ address)
+ instance = db.fixed_ip_get_instance(context.get_admin_context(),
+ address)
+ # instance exists until release
+ return instance is not None and network['id'] == project_net['id']
+
+ def run(self, result=None):
+ if(FLAGS.network_manager == 'nova.network.manager.VlanManager'):
+ super(VlanNetworkTestCase, self).run(result)
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
new file mode 100644
index 000000000..22b66010a
--- /dev/null
+++ b/nova/tests/test_vmwareapi.py
@@ -0,0 +1,252 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMWareAPI.
+"""
+
+import stubout
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.compute import power_state
+from nova.tests.glance import stubs as glance_stubs
+from nova.tests.vmwareapi import db_fakes
+from nova.tests.vmwareapi import stubs
+from nova.virt import vmwareapi_conn
+from nova.virt.vmwareapi import fake as vmwareapi_fake
+
+
+FLAGS = flags.FLAGS
+
+
+class VMWareAPIVMTestCase(test.TestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ def setUp(self):
+ super(VMWareAPIVMTestCase, self).setUp()
+ self.flags(vmwareapi_host_ip='test_url',
+ vmwareapi_host_username='test_username',
+ vmwareapi_host_password='test_pass')
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.stubs = stubout.StubOutForTesting()
+ vmwareapi_fake.reset()
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ stubs.set_stubs(self.stubs)
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+ self.conn = vmwareapi_conn.get_connection(False)
+
+ def _create_instance_in_the_db(self):
+ values = {'name': 1,
+ 'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+ self.instance = db.instance_create(values)
+
+ def _create_vm(self):
+ """Create and spawn the VM."""
+ self._create_instance_in_the_db()
+ self.type_data = db.instance_type_get_by_name(None, 'm1.large')
+ self.conn.spawn(self.instance)
+ self._check_vm_record()
+
+ def _check_vm_record(self):
+ """
+ Check if the spawned VM's properties correspond to the instance in
+ the db.
+ """
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ # Get Nova record for VM
+ vm_info = self.conn.get_info(1)
+
+ # Get record for VM
+ vms = vmwareapi_fake._get_objects("VirtualMachine")
+ vm = vms[0]
+
+ # Check that m1.large above turned into the right thing.
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ vcpus = self.type_data['vcpus']
+ self.assertEquals(vm_info['max_mem'], mem_kib)
+ self.assertEquals(vm_info['mem'], mem_kib)
+ self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
+ self.assertEquals(vm.get("summary.config.memorySizeMB"),
+ self.type_data['memory_mb'])
+
+ # Check that the VM is running according to Nova
+ self.assertEquals(vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to vSphere API.
+ self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
+
+ def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
+ """
+ Check if the get_info returned values correspond to the instance
+ object in the db.
+ """
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ self.assertEquals(info["state"], pwr_state)
+ self.assertEquals(info["max_mem"], mem_kib)
+ self.assertEquals(info["mem"], mem_kib)
+ self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ def test_list_instances_1(self):
+ self._create_vm()
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ def test_spawn(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.snapshot(self.instance, "Test-Snapshot")
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.snapshot, self.instance,
+ "Test-Snapshot")
+
+ def test_reboot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.reboot(self.instance)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ def test_reboot_not_poweredon(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ def test_suspend(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+
+ def test_suspend_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.suspend, self.instance,
+ self.dummy_callback_handler)
+
+ def test_resume(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.conn.resume(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_resume_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ def test_resume_not_suspended(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ def test_get_info(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+ self.conn.destroy(self.instance)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ def test_destroy_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertEquals(self.conn.destroy(self.instance), None)
+
+ def test_pause(self):
+ pass
+
+ def test_unpause(self):
+ pass
+
+ def test_diagnostics(self):
+ pass
+
+ def test_get_console_output(self):
+ pass
+
+ def test_get_ajax_console(self):
+ pass
+
+ def dummy_callback_handler(self, ret):
+ """
+ Dummy callback function to be passed to suspend, resume, etc., calls.
+ """
+ pass
+
+ def tearDown(self):
+ super(VMWareAPIVMTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ self.stubs.UnsetAll()
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index b40ca004b..d71b75f3f 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -20,6 +20,8 @@ Tests for Volume Code.
"""
+import cStringIO
+
from nova import context
from nova import exception
from nova import db
@@ -99,7 +101,7 @@ class VolumeTestCase(test.TestCase):
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
@@ -173,3 +175,197 @@ class VolumeTestCase(test.TestCase):
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
+
+
+class DriverTestCase(test.TestCase):
+ """Base Test class for Drivers."""
+ driver_name = "nova.volume.driver.FakeAOEDriver"
+
+ def setUp(self):
+ super(DriverTestCase, self).setUp()
+ self.flags(volume_driver=self.driver_name,
+ logging_default_format_string="%(message)s")
+ self.volume = utils.import_object(FLAGS.volume_manager)
+ self.context = context.get_admin_context()
+ self.output = ""
+
+ def _fake_execute(_command, *_args, **_kwargs):
+ """Fake _execute."""
+ return self.output, None
+ self.volume.driver._execute = _fake_execute
+ self.volume.driver._sync_execute = _fake_execute
+
+ log = logging.getLogger()
+ self.stream = cStringIO.StringIO()
+ log.addHandler(logging.StreamHandler(self.stream))
+
+ inst = {}
+ self.instance_id = db.instance_create(self.context, inst)['id']
+
+ def tearDown(self):
+ super(DriverTestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ return []
+
+ def _detach_volume(self, volume_id_list):
+ """Detach volumes from an instance."""
+ for volume_id in volume_id_list:
+ db.volume_detached(self.context, volume_id)
+ self.volume.delete_volume(self.context, volume_id)
+
+
+class AOETestCase(DriverTestCase):
+ """Test Case for AOEDriver"""
+ driver_name = "nova.volume.driver.AOEDriver"
+
+ def setUp(self):
+ super(AOETestCase, self).setUp()
+
+ def tearDown(self):
+ super(AOETestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ volume_id = db.volume_create(self.context,
+ vol)['id']
+ self.volume.create_volume(self.context, volume_id)
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, volume_id, self.instance_id,
+ mountpoint)
+
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id)
+ self.output += "%s %s eth0 /dev/nova-volumes/vol-foo auto run\n" \
+ % (shelf_id, blade_id)
+
+ volume_id_list.append(volume_id)
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_vblade_processes(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_vblade_process_missing(self):
+ """Output a warning message when some vblade processes aren't
+ running."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ self.output = self.output.replace("run", "down", 1)
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id_list[0])
+
+ msg_is_match = False
+ self.stream.truncate(0)
+ try:
+ self.volume.check_for_export(self.context, self.instance_id)
+ except exception.ProcessExecutionError, e:
+ volume_id = volume_id_list[0]
+ msg = _("Cannot confirm exported volume id:%(volume_id)s. "
+ "vblade process for e%(shelf_id)s.%(blade_id)s "
+ "isn't running.") % locals()
+
+ msg_is_match = (0 <= e.message.find(msg))
+
+ self.assertTrue(msg_is_match)
+ self._detach_volume(volume_id_list)
+
+
+class ISCSITestCase(DriverTestCase):
+ """Test Case for ISCSIDriver"""
+ driver_name = "nova.volume.driver.ISCSIDriver"
+
+ def setUp(self):
+ super(ISCSITestCase, self).setUp()
+
+ def tearDown(self):
+ super(ISCSITestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ vol_ref = db.volume_create(self.context, vol)
+ self.volume.create_volume(self.context, vol_ref['id'])
+ vol_ref = db.volume_get(self.context, vol_ref['id'])
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, vol_ref['id'], self.instance_id,
+ mountpoint)
+ volume_id_list.append(vol_ref['id'])
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_volume_exported(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ for i in volume_id_list:
+ tid = db.volume_get_iscsi_target_num(self.context, i)
+ self.volume.driver._execute("sudo", "ietadm", "--op", "show",
+ "--tid=%(tid)d" % locals())
+
+ self.stream.truncate(0)
+ self.mox.ReplayAll()
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_some_volume_missing(self):
+ """Output a warning message when some volumes are not recognied
+ by ietd."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ self.volume.driver._execute("sudo", "ietadm", "--op", "show",
+ "--tid=%(tid)d" % locals()).AndRaise(
+ exception.ProcessExecutionError())
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.volume.check_for_export,
+ self.context,
+ self.instance_id)
+ msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
+ self.assertTrue(0 <= self.stream.getvalue().find(msg))
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 6b8efc9d8..17e3f55e9 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -14,15 +14,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Test suite for XenAPI
-"""
+"""Test suite for XenAPI."""
+import functools
+import os
+import re
import stubout
+import ast
from nova import db
from nova import context
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
@@ -31,29 +34,47 @@ from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
+from nova.tests import fake_utils
+
+LOG = logging.getLogger('nova.tests.test_xenapi')
FLAGS = flags.FLAGS
-class XenAPIVolumeTestCase(test.TestCase):
+def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
- Unit tests for Volume operations
+ vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
"""
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
+ vm_utils.with_vdi_attached_here = lambda *x: should_return
+ function(self, *args, **kwargs)
+ vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
+ return decorated_function
+
+
+class XenAPIVolumeTestCase(test.TestCase):
+ """Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
+ self.context = context.RequestContext('fake', 'fake', False)
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
- self.values = {'name': 1, 'id': 1,
+ self.values = {'id': 1,
'project_id': 'fake',
'user_id': 'fake',
'image_id': 1,
@@ -61,7 +82,7 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
+ 'os_type': 'linux'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -73,10 +94,10 @@ class XenAPIVolumeTestCase(test.TestCase):
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
- return db.volume_create(context.get_admin_context(), vol)
+ return db.volume_create(self.context, vol)
def test_create_iscsi_storage(self):
- """ This shows how to test helper classes' methods """
+ """This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
@@ -91,7 +112,7 @@ class XenAPIVolumeTestCase(test.TestCase):
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_parse_volume_info_raise_exception(self):
- """ This shows how to test helper classes' methods """
+ """This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
@@ -105,11 +126,11 @@ class XenAPIVolumeTestCase(test.TestCase):
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
- """ This shows how to test Ops classes' methods """
+ """This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
- instance = db.instance_create(self.values)
+ instance = db.instance_create(self.context, self.values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
@@ -124,12 +145,12 @@ class XenAPIVolumeTestCase(test.TestCase):
check()
def test_attach_volume_raise_exception(self):
- """ This shows how to test when exceptions are raised """
+ """This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
- instance = db.instance_create(self.values)
+ instance = db.instance_create(self.context, self.values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
@@ -147,9 +168,7 @@ def reset_network(*args):
class XenAPIVMTestCase(test.TestCase):
- """
- Unit tests for VM operations
- """
+ """Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.manager = manager.AuthManager()
@@ -158,18 +177,24 @@ class XenAPIVMTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
- FLAGS.xenapi_connection_url = 'test_url'
- FLAGS.xenapi_connection_password = 'test_pass'
+ self.flags(xenapi_connection_url='test_url',
+ xenapi_connection_password='test_pass',
+ instance_name_template='%d')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
+ xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
+ stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network)
+ stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
+ fake_utils.stub_out_utils_execute(self.stubs)
+ self.context = context.RequestContext('fake', 'fake', False)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
@@ -194,7 +219,7 @@ class XenAPIVMTestCase(test.TestCase):
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
- self.assertEquals(vm_labels, [1])
+ self.assertEquals(vm_labels, ['1'])
def ensure_vbd_was_torn_down():
vbd_labels = []
@@ -202,7 +227,7 @@ class XenAPIVMTestCase(test.TestCase):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
- self.assertEquals(vbd_labels, [1])
+ self.assertEquals(vbd_labels, ['1'])
def ensure_vdi_was_torn_down():
for vdi_ref in xenapi_fake.get_all('VDI'):
@@ -217,43 +242,96 @@ class XenAPIVMTestCase(test.TestCase):
check()
- def check_vm_record(self, conn):
+ def create_vm_record(self, conn, os_type, instance_id=1):
instances = conn.list_instances()
- self.assertEquals(instances, [1])
+ self.assertEquals(instances, [str(instance_id)])
# Get Nova record for VM
- vm_info = conn.get_info(1)
-
+ vm_info = conn.get_info(instance_id)
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+ def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
- instance_type = instance_types.INSTANCE_TYPES['m1.large']
+ instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm['memory_static_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
- self.assertEquals(vm['VCPUs_max'], str(vcpus))
- self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
+ self.assertEquals(self.vm_info['max_mem'], mem_kib)
+ self.assertEquals(self.vm_info['mem'], mem_kib)
+ self.assertEquals(self.vm['memory_static_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
+ self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
- self.assertEquals(vm['power_state'], 'Running')
+ self.assertEquals(self.vm['power_state'], 'Running')
+
+ if check_injection:
+ xenstore_data = self.vm['xenstore_data']
+ key = 'vm-data/networking/aabbccddeeff'
+ xenstore_value = xenstore_data[key]
+ tcpip_data = ast.literal_eval(xenstore_value)
+ self.assertEquals(tcpip_data,
+ {'label': 'fake_flat_network',
+ 'broadcast': '10.0.0.255',
+ 'ips': [{'ip': '10.0.0.3',
+ 'netmask':'255.255.255.0',
+ 'enabled':'1'}],
+ 'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
+ 'netmask': '120',
+ 'enabled': '1',
+ 'gateway': 'fe80::a00:1'}],
+ 'mac': 'aa:bb:cc:dd:ee:ff',
+ 'dns': ['10.0.0.2'],
+ 'gateway': '10.0.0.1'})
+
+ def check_vm_params_for_windows(self):
+ self.assertEquals(self.vm['platform']['nx'], 'true')
+ self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_args'], '')
+ self.assertEquals(self.vm['PV_bootloader'], '')
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
+ self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEquals(self.vm['PV_kernel'], '')
+ self.assertNotEquals(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
- instance_type="m1.large"):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- values = {'name': 1,
- 'id': 1,
+ instance_type="m1.large", os_type="linux",
+ instance_id=1, check_injection=False):
+ stubs.stubout_loopingcall_start(self.stubs)
+ values = {'id': instance_id,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': image_id,
@@ -261,11 +339,11 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
- conn = xenapi_conn.get_connection(False)
- instance = db.instance_create(values)
- conn.spawn(instance)
- self.check_vm_record(conn)
+ 'os_type': os_type}
+ instance = db.instance_create(self.context, values)
+ self.conn.spawn(instance)
+ self.create_vm_record(self.conn, os_type, instance_id)
+ self.check_vm_record(self.conn, check_injection)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
@@ -281,24 +359,164 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, 2, 3)
+ @stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, None, None)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_linux(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="linux")
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_windows(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="windows")
+ self.check_vm_params_for_windows()
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, 2, 3)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
+
+ def test_spawn_netinject_file(self):
+ FLAGS.xenapi_image_service = 'glance'
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _tee_handler(cmd, **kwargs):
+ input = kwargs.get('process_input', None)
+ self.assertNotEqual(input, None)
+ config = [line.strip() for line in input.split("\n")]
+ # Find the start of eth0 configuration and check it
+ index = config.index('auto eth0')
+ self.assertEquals(config[index + 1:index + 8], [
+ 'iface eth0 inet static',
+ 'address 10.0.0.3',
+ 'netmask 255.255.255.0',
+ 'broadcast 10.0.0.255',
+ 'gateway 10.0.0.1',
+ 'dns-nameservers 10.0.0.2',
+ ''])
+ self._tee_executed = True
+ return '', ''
+
+ fake_utils.fake_execute_set_repliers([
+ # Capture the sudo tee .../etc/network/interfaces command
+ (r'(sudo\s+)?tee.*interfaces', _tee_handler),
+ ])
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK,
+ check_injection=True)
+ self.assertTrue(self._tee_executed)
+
+ def test_spawn_netinject_xenstore(self):
+ FLAGS.xenapi_image_service = 'glance'
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # When mounting, create real files under the mountpoint to simulate
+ # files in the mounted filesystem
+
+ # mount point will be the last item of the command list
+ self._tmpdir = cmd[len(cmd) - 1]
+ LOG.debug(_('Creating files in %s to simulate guest agent' %
+ self._tmpdir))
+ os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ # Touch the file using open
+ open(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'), 'w').close()
+ return '', ''
+
+ def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # Umount would normall make files in the m,ounted filesystem
+ # disappear, so do that here
+ LOG.debug(_('Removing simulated guest agent files in %s' %
+ self._tmpdir))
+ os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr'))
+ return '', ''
+
+ def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
+ self._tee_executed = True
+ return '', ''
+
+ fake_utils.fake_execute_set_repliers([
+ (r'(sudo\s+)?mount', _mount_handler),
+ (r'(sudo\s+)?umount', _umount_handler),
+ (r'(sudo\s+)?tee.*interfaces', _tee_handler)])
+ self._test_spawn(1, 2, 3, check_injection=True)
+
+ # tee must not run in this case, where an injection-capable
+ # guest agent is detected
+ self.assertFalse(self._tee_executed)
+
+ def test_spawn_vlanmanager(self):
+ self.flags(xenapi_image_service='glance',
+ network_manager='nova.network.manager.VlanManager',
+ network_driver='nova.network.xenapi_net',
+ vlan_interface='fake0')
+ # Reset network table
+ xenapi_fake.reset_table('network')
+ # Instance id = 2 will use vlan network (see db/fakes.py)
+ fake_instance_id = 2
+ network_bk = self.network
+ # Ensure we use xenapi_net driver
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.network.setup_compute_network(None, fake_instance_id)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK,
+ instance_id=fake_instance_id)
+ # TODO(salvatore-orlando): a complete test here would require
+ # a check for making sure the bridge for the VM's VIF is
+ # consistent with bridge specified in nova db
+ self.network = network_bk
+
+ def test_spawn_with_network_qos(self):
+ self._create_instance()
+ for vif_ref in xenapi_fake.get_all('VIF'):
+ vif_rec = xenapi_fake.get_record('VIF', vif_ref)
+ self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
+ self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
+ str(4 * 1024))
+
+ def test_rescue(self):
+ self.flags(xenapi_inject_image=False)
+ instance = self._create_instance()
+ conn = xenapi_conn.get_connection(False)
+ conn.rescue(instance, None)
+
+ def test_unrescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.get_connection(False)
+ # Ensure that it will not unrescue a non-rescued instance.
+ self.assertRaises(Exception, conn.unrescue, instance, None)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ self.vm_info = None
+ self.vm = None
self.stubs.UnsetAll()
def _create_instance(self):
- """Creates and spawns a test instance"""
+ """Creates and spawns a test instance."""
+ stubs.stubout_loopingcall_start(self.stubs)
values = {
- 'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
@@ -306,16 +524,15 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff'}
- instance = db.instance_create(values)
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
+ instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
- """
- Unit tests for Diffie-Hellman code
- """
+ """Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = SimpleDH()
@@ -336,3 +553,115 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
+
+
+class XenAPIMigrateInstance(test.TestCase):
+ """Unit test for verifying migration-related actions."""
+
+ def setUp(self):
+ super(XenAPIMigrateInstance, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ FLAGS.target_host = '127.0.0.1'
+ FLAGS.xenapi_connection_url = 'test_url'
+ FLAGS.xenapi_connection_password = 'test_pass'
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ stubs.stub_out_get_target(self.stubs)
+ xenapi_fake.reset()
+ xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.RequestContext('fake', 'fake', False)
+ self.values = {'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': 1,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'local_gb': 5,
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
+
+ fake_utils.stub_out_utils_execute(self.stubs)
+ stubs.stub_out_migration_methods(self.stubs)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+
+ def tearDown(self):
+ super(XenAPIMigrateInstance, self).tearDown()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ self.stubs.UnsetAll()
+
+ def test_migrate_disk_and_power_off(self):
+ instance = db.instance_create(self.context, self.values)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ conn = xenapi_conn.get_connection(False)
+ conn.migrate_disk_and_power_off(instance, '127.0.0.1')
+
+ def test_finish_resize(self):
+ instance = db.instance_create(self.context, self.values)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ stubs.stubout_loopingcall_start(self.stubs)
+ conn = xenapi_conn.get_connection(False)
+ conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'))
+
+
+class XenAPIDetermineDiskImageTestCase(test.TestCase):
+ """Unit tests for code that detects the ImageType."""
+ def setUp(self):
+ super(XenAPIDetermineDiskImageTestCase, self).setUp()
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+
+ class FakeInstance(object):
+ pass
+
+ self.fake_instance = FakeInstance()
+ self.fake_instance.id = 42
+ self.fake_instance.os_type = 'linux'
+
+ def assert_disk_type(self, disk_type):
+ dt = vm_utils.VMHelper.determine_disk_image_type(
+ self.fake_instance)
+ self.assertEqual(disk_type, dt)
+
+ def test_instance_disk(self):
+ """If a kernel is specified, the image type is DISK (aka machine)."""
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
+ self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
+ self.assert_disk_type(vm_utils.ImageType.DISK)
+
+ def test_instance_disk_raw(self):
+ """
+ If the kernel isn't specified, and we're not using Glance, then
+ DISK_RAW is assumed.
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_raw(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'raw'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_vhd(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'vhd'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py
new file mode 100644
index 000000000..688dc704d
--- /dev/null
+++ b/nova/tests/test_zones.py
@@ -0,0 +1,206 @@
+# Copyright 2010 United States Government as represented by the
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For ZoneManager
+"""
+
+import datetime
+import mox
+import novaclient
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import service
+from nova import test
+from nova import rpc
+from nova import utils
+from nova.auth import manager as auth_manager
+from nova.scheduler import zone_manager
+
+FLAGS = flags.FLAGS
+
+
+class FakeZone:
+ """Represents a fake zone from the db"""
+ def __init__(self, *args, **kwargs):
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+
+
+def exploding_novaclient(zone):
+ """Used when we want to simulate a novaclient call failing."""
+ raise Exception("kaboom")
+
+
+class ZoneManagerTestCase(test.TestCase):
+ """Test case for zone manager"""
+ def test_ping(self):
+ zm = zone_manager.ZoneManager()
+ self.mox.StubOutWithMock(zm, '_refresh_from_db')
+ self.mox.StubOutWithMock(zm, '_poll_zones')
+ zm._refresh_from_db(mox.IgnoreArg())
+ zm._poll_zones(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ zm.ping(None)
+ self.mox.VerifyAll()
+
+ def test_refresh_from_db_new(self):
+ zm = zone_manager.ZoneManager()
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([
+ FakeZone(id=1, api_url='http://foo.com', username='user1',
+ password='pass1'),
+ ])
+
+ self.assertEquals(len(zm.zone_states), 0)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertEquals(zm.zone_states[1].username, 'user1')
+
+ def test_service_capabilities(self):
+ zm = zone_manager.ZoneManager()
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, {})
+
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
+
+ zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
+
+ zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
+
+ zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
+ svc10_a=(99, 99), svc10_b=(99, 99)))
+
+ zm.update_service_capabilities("svc1", "host3", dict(c=5))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
+ svc1_c=(5, 5), svc10_a=(99, 99),
+ svc10_b=(99, 99)))
+
+ caps = zm.get_zone_capabilities(self, 'svc1')
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
+ svc1_c=(5, 5)))
+ caps = zm.get_zone_capabilities(self, 'svc10')
+ self.assertEquals(caps, dict(svc10_a=(99, 99), svc10_b=(99, 99)))
+
+ def test_refresh_from_db_replace_existing(self):
+ zm = zone_manager.ZoneManager()
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
+ username='user1', password='pass1'))
+ zm.zone_states[1] = zone_state
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([
+ FakeZone(id=1, api_url='http://foo.com', username='user2',
+ password='pass2'),
+ ])
+
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertEquals(zm.zone_states[1].username, 'user2')
+
+ def test_refresh_from_db_missing(self):
+ zm = zone_manager.ZoneManager()
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
+ username='user1', password='pass1'))
+ zm.zone_states[1] = zone_state
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([])
+
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 0)
+
+ def test_refresh_from_db_add_and_delete(self):
+ zm = zone_manager.ZoneManager()
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
+ username='user1', password='pass1'))
+ zm.zone_states[1] = zone_state
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([
+ FakeZone(id=2, api_url='http://foo.com', username='user2',
+ password='pass2'),
+ ])
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertEquals(zm.zone_states[2].username, 'user2')
+
+ def test_poll_zone(self):
+ self.mox.StubOutWithMock(zone_manager, '_call_novaclient')
+ zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn(
+ dict(name='zohan', capabilities='hairdresser'))
+
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=2,
+ api_url='http://foo.com', username='user2',
+ password='pass2'))
+ zone_state.attempt = 1
+
+ self.mox.ReplayAll()
+ zone_manager._poll_zone(zone_state)
+ self.mox.VerifyAll()
+ self.assertEquals(zone_state.attempt, 0)
+ self.assertEquals(zone_state.name, 'zohan')
+
+ def test_poll_zone_fails(self):
+ self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient)
+
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=2,
+ api_url='http://foo.com', username='user2',
+ password='pass2'))
+ zone_state.attempt = FLAGS.zone_failures_to_offline - 1
+
+ self.mox.ReplayAll()
+ zone_manager._poll_zone(zone_state)
+ self.mox.VerifyAll()
+ self.assertEquals(zone_state.attempt, 3)
+ self.assertFalse(zone_state.is_active)
+ self.assertEquals(zone_state.name, None)
diff --git a/nova/tests/vmwareapi/__init__.py b/nova/tests/vmwareapi/__init__.py
new file mode 100644
index 000000000..478ee742b
--- /dev/null
+++ b/nova/tests/vmwareapi/__init__.py
@@ -0,0 +1,21 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`vmwareapi` -- Stubs for VMware API
+=======================================
+"""
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
new file mode 100644
index 000000000..0addd5573
--- /dev/null
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts, mocks and fixtures for the test suite
+"""
+
+import time
+
+from nova import db
+from nova import utils
+
+
+def stub_out_db_instance_api(stubs):
+ """Stubs out the db API for creating Instances."""
+
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
+ class FakeModel(object):
+ """Stubs out for model."""
+
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def fake_instance_create(values):
+ """Stubs out the db.instance_create method."""
+
+ type_data = INSTANCE_TYPES[values['instance_type']]
+
+ base_options = {
+ 'name': values['name'],
+ 'id': values['id'],
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_id': values['image_id'],
+ 'kernel_id': values['kernel_id'],
+ 'ramdisk_id': values['ramdisk_id'],
+ 'state_description': 'scheduling',
+ 'user_id': values['user_id'],
+ 'project_id': values['project_id'],
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': values['instance_type'],
+ 'memory_mb': type_data['memory_mb'],
+ 'mac_address': values['mac_address'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ }
+ return FakeModel(base_options)
+
+ def fake_network_get_by_instance(context, instance_id):
+ """Stubs out the db.network_get_by_instance method."""
+
+ fields = {
+ 'bridge': 'vmnet0',
+ 'netmask': '255.255.255.0',
+ 'gateway': '10.10.10.1',
+ 'vlan': 100}
+ return FakeModel(fields)
+
+ def fake_instance_action_create(context, action):
+ """Stubs out the db.instance_action_create method."""
+ pass
+
+ def fake_instance_get_fixed_address(context, instance_id):
+ """Stubs out the db.instance_get_fixed_address method."""
+ return '10.10.10.10'
+
+ def fake_instance_type_get_all(context, inactive=0):
+ return INSTANCE_TYPES
+
+ def fake_instance_type_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
+
+ stubs.Set(db, 'instance_create', fake_instance_create)
+ stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
+ stubs.Set(db, 'instance_action_create', fake_instance_action_create)
+ stubs.Set(db, 'instance_get_fixed_address',
+ fake_instance_get_fixed_address)
+ stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
+ stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py
new file mode 100644
index 000000000..a648efb16
--- /dev/null
+++ b/nova/tests/vmwareapi/stubs.py
@@ -0,0 +1,46 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts for the test suite
+"""
+
+from nova.virt import vmwareapi_conn
+from nova.virt.vmwareapi import fake
+from nova.virt.vmwareapi import vmware_images
+
+
+def fake_get_vim_object(arg):
+ """Stubs out the VMWareAPISession's get_vim_object method."""
+ return fake.FakeVim()
+
+
+def fake_is_vim_object(arg, module):
+ """Stubs out the VMWareAPISession's is_vim_object method."""
+ return isinstance(module, fake.FakeVim)
+
+
+def set_stubs(stubs):
+ """Set the stubs."""
+ stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
+ stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
+ fake.fake_get_vmdk_size_and_properties)
+ stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
+ stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
+ fake_get_vim_object)
+ stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object",
+ fake_is_vim_object)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 624995ada..205f6c902 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -20,6 +20,8 @@ from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova import utils
def stubout_instance_snapshot(stubs):
@@ -27,7 +29,7 @@ def stubout_instance_snapshot(stubs):
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
# Stubout wait_for_task
- def fake_wait_for_task(self, id, task):
+ def fake_wait_for_task(self, task, id):
class FakeEvent:
def send(self, value):
@@ -130,14 +132,23 @@ def stubout_stream_disk(stubs):
stubs.Set(vm_utils, '_stream_disk', f)
+def stubout_is_vdi_pv(stubs):
+ def f(_1):
+ return False
+ stubs.Set(vm_utils, '_is_vdi_pv', f)
+
+
+def stubout_loopingcall_start(stubs):
+ def fake_start(self, interval, now=True):
+ self.f(*self.args, **self.kw)
+ stubs.Set(utils.LoopingCall, 'start', fake_start)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
super(FakeSessionForVMTests, self).__init__(uri)
- def network_get_all_records_where(self, _1, _2):
- return self.xenapi.network.get_all_records()
-
def host_call_plugin(self, _1, _2, _3, _4, _5):
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False)
@@ -171,6 +182,31 @@ class FakeSessionForVMTests(fake.SessionBase):
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+ def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
+ pass
+
+
+def stub_out_vm_methods(stubs):
+ def fake_shutdown(self, inst, vm, method="clean"):
+ pass
+
+ def fake_acquire_bootlock(self, vm):
+ pass
+
+ def fake_release_bootlock(self, vm):
+ pass
+
+ def fake_spawn_rescue(self, inst):
+ inst._rescue = False
+
+ stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown)
+ stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
+ stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
+ stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue)
+
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
@@ -205,3 +241,63 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
def SR_forget(self, _1, ref):
pass
+
+
+class FakeSessionForMigrationTests(fake.SessionBase):
+ """Stubs out a XenAPISession for Migration tests"""
+ def __init__(self, uri):
+ super(FakeSessionForMigrationTests, self).__init__(uri)
+
+ def VDI_get_by_uuid(*args):
+ return 'hurr'
+
+ def VDI_resize_online(*args):
+ pass
+
+ def VM_start(self, _1, ref, _2, _3):
+ vm = fake.get_record('VM', ref)
+ if vm['power_state'] != 'Halted':
+ raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
+ vm['power_state']])
+ vm['power_state'] = 'Running'
+ vm['is_a_template'] = False
+ vm['is_control_domain'] = False
+
+
+def stub_out_migration_methods(stubs):
+ def fake_get_snapshot(self, instance):
+ return 'vm_ref', dict(image='foo', snap='bar')
+
+ @classmethod
+ def fake_get_vdi(cls, session, vm_ref):
+ vdi_ref = fake.create_vdi(name_label='derp', read_only=False,
+ sr_ref='herp', sharable=False)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, {'uuid': vdi_rec['uuid'], }
+
+ def fake_shutdown(self, inst, vm, hard=True):
+ pass
+
+ @classmethod
+ def fake_sr(cls, session, *args):
+ pass
+
+ @classmethod
+ def fake_get_sr_path(cls, *args):
+ return "fake"
+
+ def fake_destroy(*args, **kwargs):
+ pass
+
+ def fake_reset_network(*args, **kwargs):
+ pass
+
+ stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
+ stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
+ stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot)
+ stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None)
+ stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network)
+ stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown)
diff --git a/nova/utils.py b/nova/utils.py
index 0cf91e0cc..3f6f9fc8a 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -23,10 +23,14 @@ System-level utilities and helper functions.
import base64
import datetime
+import functools
import inspect
import json
+import lockfile
+import netaddr
import os
import random
+import re
import socket
import string
import struct
@@ -34,20 +38,21 @@ import sys
import time
import types
from xml.sax import saxutils
-import re
-import netaddr
from eventlet import event
from eventlet import greenthread
+from eventlet import semaphore
from eventlet.green import subprocess
-
+None
from nova import exception
from nova.exception import ProcessExecutionError
+from nova import flags
from nova import log as logging
LOG = logging.getLogger("nova.utils")
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+FLAGS = flags.FLAGS
def import_class(import_str):
@@ -125,40 +130,65 @@ def fetchfile(url, target):
# c.perform()
# c.close()
# fp.close()
- execute("curl --fail %s -o %s" % (url, target))
-
-
-def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
- LOG.debug(_("Running cmd (subprocess): %s"), cmd)
- env = os.environ.copy()
- if addl_env:
- env.update(addl_env)
- obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
- result = None
- if process_input != None:
- result = obj.communicate(process_input)
- else:
- result = obj.communicate()
- obj.stdin.close()
- if obj.returncode:
- LOG.debug(_("Result was %s") % obj.returncode)
- if check_exit_code and obj.returncode != 0:
- (stdout, stderr) = result
- raise ProcessExecutionError(exit_code=obj.returncode,
- stdout=stdout,
- stderr=stderr,
- cmd=cmd)
- # NOTE(termie): this appears to be necessary to let the subprocess call
- # clean something up in between calls, without it two
- # execute calls in a row hangs the second one
- greenthread.sleep(0)
- return result
+ execute("curl", "--fail", url, "-o", target)
+
+
+def execute(*cmd, **kwargs):
+ process_input = kwargs.pop('process_input', None)
+ addl_env = kwargs.pop('addl_env', None)
+ check_exit_code = kwargs.pop('check_exit_code', 0)
+ delay_on_retry = kwargs.pop('delay_on_retry', True)
+ attempts = kwargs.pop('attempts', 1)
+ if len(kwargs):
+ raise exception.Error(_('Got unknown keyword args '
+ 'to utils.execute: %r') % kwargs)
+ cmd = map(str, cmd)
+
+ while attempts > 0:
+ attempts -= 1
+ try:
+ LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd))
+ env = os.environ.copy()
+ if addl_env:
+ env.update(addl_env)
+ obj = subprocess.Popen(cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env)
+ result = None
+ if process_input != None:
+ result = obj.communicate(process_input)
+ else:
+ result = obj.communicate()
+ obj.stdin.close()
+ if obj.returncode:
+ LOG.debug(_("Result was %s") % obj.returncode)
+ if type(check_exit_code) == types.IntType \
+ and obj.returncode != check_exit_code:
+ (stdout, stderr) = result
+ raise ProcessExecutionError(exit_code=obj.returncode,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=' '.join(cmd))
+ return result
+ except ProcessExecutionError:
+ if not attempts:
+ raise
+ else:
+ LOG.debug(_("%r failed. Retrying."), cmd)
+ if delay_on_retry:
+ greenthread.sleep(random.randint(20, 200) / 100.0)
+ finally:
+ # NOTE(termie): this appears to be necessary to let the subprocess
+ # call clean something up in between calls, without
+ # it two execute calls in a row hangs the second one
+ greenthread.sleep(0)
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
- LOG.debug(_("Running cmd (SSH): %s"), cmd)
+ LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd))
if addl_env:
raise exception.Error("Environment not supported over SSH")
@@ -187,7 +217,7 @@ def ssh_execute(ssh, cmd, process_input=None,
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
- cmd=cmd)
+ cmd=' '.join(cmd))
return (stdout, stderr)
@@ -220,9 +250,9 @@ def debug(arg):
return arg
-def runthis(prompt, cmd, check_exit_code=True):
- LOG.debug(_("Running %s"), (cmd))
- rv, err = execute(cmd, check_exit_code=check_exit_code)
+def runthis(prompt, *cmd, **kwargs):
+ LOG.debug(_("Running %s"), (" ".join(cmd)))
+ rv, err = execute(*cmd, **kwargs)
def generate_uid(topic, size=8):
@@ -239,13 +269,25 @@ def generate_mac():
return ':'.join(map(lambda x: "%02x" % x, mac))
-def generate_password(length=20):
- """Generate a random sequence of letters and digits
- to be used as a password. Note that this is not intended
- to represent the ultimate in security.
+# Default symbols to use for passwords. Avoids visually confusing characters.
+# ~6 bits per symbol
+DEFAULT_PASSWORD_SYMBOLS = ("23456789" # Removed: 0,1
+ "ABCDEFGHJKLMNPQRSTUVWXYZ" # Removed: I, O
+ "abcdefghijkmnopqrstuvwxyz") # Removed: l
+
+
+# ~5 bits per symbol
+EASIER_PASSWORD_SYMBOLS = ("23456789" # Removed: 0, 1
+ "ABCDEFGHJKLMNPQRSTUVWXYZ") # Removed: I, O
+
+
+def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
+ """Generate a random password from the supplied symbols.
+
+ Believed to be reasonably secure (with a reasonable password length!)
"""
- chrs = string.letters + string.digits
- return "".join([random.choice(chrs) for i in xrange(length)])
+ r = random.SystemRandom()
+ return "".join([r.choice(symbols) for _i in xrange(length)])
def last_octet(address):
@@ -254,7 +296,7 @@ def last_octet(address):
def get_my_linklocal(interface):
try:
- if_str = execute("ip -f inet6 -o addr show %s" % interface)
+ if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface)
condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
@@ -269,11 +311,15 @@ def get_my_linklocal(interface):
def to_global_ipv6(prefix, mac):
- mac64 = netaddr.EUI(mac).eui64().words
- int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
- mac64_addr = netaddr.IPAddress(int_addr)
- maskIP = netaddr.IPNetwork(prefix).ip
- return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).format()
+ try:
+ mac64 = netaddr.EUI(mac).eui64().words
+ int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
+ mac64_addr = netaddr.IPAddress(int_addr)
+ maskIP = netaddr.IPNetwork(prefix).ip
+ return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
+ format()
+ except TypeError:
+ raise TypeError(_("Bad mac for to_global_ipv6: %s") % mac)
def to_mac(ipv6_address):
@@ -294,6 +340,11 @@ def utcnow():
utcnow.override_time = None
+def is_older_than(before, seconds):
+ """Return True if before is older than seconds"""
+ return utcnow() - before > datetime.timedelta(seconds=seconds)
+
+
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return time.mktime(utcnow().timetuple())
@@ -491,16 +542,78 @@ def loads(s):
return json.loads(s)
-def ensure_b64_encoding(val):
- """Safety method to ensure that values expected to be base64-encoded
- actually are. If they are, the value is returned unchanged. Otherwise,
- the encoded value is returned.
+_semaphores = {}
+
+
+class _NoopContextManager(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+
+def synchronized(name, external=False):
+ """Synchronization decorator
+
+ Decorating a method like so:
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ ensures that only one thread will execute the bar method at a time.
+
+ Different methods can share the same lock:
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ @synchronized('mylock')
+ def bar(self, *args):
+ ...
+
+ This way only one of either foo or bar can be executing at a time.
+
+ The external keyword argument denotes whether this lock should work across
+ multiple processes. This means that if two different workers both run a
+ a method decorated with @synchronized('mylock', external=True), only one
+ of them will execute at a time.
"""
- try:
- dummy = base64.decode(val)
- return val
- except TypeError:
- return base64.b64encode(val)
+
+ def wrap(f):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ # NOTE(soren): If we ever go natively threaded, this will be racy.
+ # See http://stackoverflow.com/questions/5390569/dyn\
+ # amically-allocating-and-destroying-mutexes
+ if name not in _semaphores:
+ _semaphores[name] = semaphore.Semaphore()
+ sem = _semaphores[name]
+ LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
+ '"%(method)s"...' % {"lock": name,
+ "method": f.__name__}))
+ with sem:
+ if external:
+ LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
+ 'method "%(method)s"...' %
+ {"lock": name, "method": f.__name__}))
+ lock_file_path = os.path.join(FLAGS.lock_path,
+ 'nova-%s.lock' % name)
+ lock = lockfile.FileLock(lock_file_path)
+ else:
+ lock = _NoopContextManager()
+
+ with lock:
+ retval = f(*args, **kwargs)
+
+ # If no-one else is waiting for it, delete it.
+ # See note about possible raciness above.
+ if not sem.balance < 1:
+ del _semaphores[name]
+
+ return retval
+ return inner
+ return wrap
def get_from_path(items, path):
@@ -550,3 +663,54 @@ def get_from_path(items, path):
return results
else:
return get_from_path(results, remainder)
+
+
+def flatten_dict(dict_, flattened=None):
+ """Recursively flatten a nested dictionary"""
+ flattened = flattened or {}
+ for key, value in dict_.iteritems():
+ if hasattr(value, 'iteritems'):
+ flatten_dict(value, flattened)
+ else:
+ flattened[key] = value
+ return flattened
+
+
+def partition_dict(dict_, keys):
+ """Return two dicts, one containing only `keys` the other containing
+ everything but `keys`
+ """
+ intersection = {}
+ difference = {}
+ for key, value in dict_.iteritems():
+ if key in keys:
+ intersection[key] = value
+ else:
+ difference[key] = value
+ return intersection, difference
+
+
+def map_dict_keys(dict_, key_map):
+ """Return a dictionary in which the dictionaries keys are mapped to
+ new keys.
+ """
+ mapped = {}
+ for key, value in dict_.iteritems():
+ mapped_key = key_map[key] if key in key_map else key
+ mapped[mapped_key] = value
+ return mapped
+
+
+def subset_dict(dict_, keys):
+ """Return a dict that only contains a subset of keys"""
+ subset = partition_dict(dict_, keys)[0]
+ return subset
+
+
+def check_isinstance(obj, cls):
+ """Checks that obj is of type cls, and lets PyLint infer types"""
+ if isinstance(obj, cls):
+ return obj
+ raise Exception(_("Expected object of type: %s") % (str(cls)))
+ # TODO(justinsb): Can we make this better??
+ return cls() # Ugly PyLint hack
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 13181b730..99a8849f1 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -23,10 +23,13 @@ import sys
from nova import flags
from nova import log as logging
+from nova import utils
+from nova.virt import driver
from nova.virt import fake
+from nova.virt import hyperv
from nova.virt import libvirt_conn
+from nova.virt import vmwareapi_conn
from nova.virt import xenapi_conn
-from nova.virt import hyperv
LOG = logging.getLogger("nova.virt.connection")
@@ -66,10 +69,12 @@ def get_connection(read_only=False):
conn = xenapi_conn.get_connection(read_only)
elif t == 'hyperv':
conn = hyperv.get_connection(read_only)
+ elif t == 'vmwareapi':
+ conn = vmwareapi_conn.get_connection(read_only)
else:
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
- return conn
+ return utils.check_isinstance(conn, driver.ComputeDriver)
diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template
new file mode 100644
index 000000000..48842b29d
--- /dev/null
+++ b/nova/virt/cpuinfo.xml.template
@@ -0,0 +1,9 @@
+<cpu>
+ <arch>$arch</arch>
+ <model>$model</model>
+ <vendor>$vendor</vendor>
+ <topology sockets="$topology.sockets" cores="$topology.cores" threads="$topology.threads"/>
+#for $var in $features
+ <features name="$var" />
+#end for
+</cpu>
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index cb639a102..ddea1a1f7 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -26,6 +26,8 @@ import os
import tempfile
import time
+from nova import context
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -38,8 +40,13 @@ flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
+flags.DEFINE_string('injected_network_template',
+ utils.abspath('virt/interfaces.template'),
+ 'Template file for injected network')
flags.DEFINE_integer('timeout_nbd', 10,
'time to wait for a NBD device coming up')
+flags.DEFINE_integer('max_nbd_devices', 16,
+ 'maximum number of possible nbd devices')
def extend(image, size):
@@ -47,10 +54,10 @@ def extend(image, size):
file_size = os.path.getsize(image)
if file_size >= size:
return
- utils.execute('truncate -s %s %s' % (size, image))
+ utils.execute('truncate', '-s', size, image)
# NOTE(vish): attempts to resize filesystem
- utils.execute('e2fsck -fp %s' % image, check_exit_code=False)
- utils.execute('resize2fs %s' % image, check_exit_code=False)
+ utils.execute('e2fsck', '-fp', image, check_exit_code=False)
+ utils.execute('resize2fs', image, check_exit_code=False)
def inject_data(image, key=None, net=None, partition=None, nbd=False):
@@ -66,7 +73,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
try:
if not partition is None:
# create partition
- out, err = utils.execute('sudo kpartx -a %s' % device)
+ out, err = utils.execute('sudo', 'kpartx', '-a', device)
if err:
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
@@ -82,41 +89,73 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
- out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
+ out, err = utils.execute('sudo', 'tune2fs',
+ '-c', 0, '-i', 0, mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = utils.execute(
- 'sudo mount %s %s' % (mapped_device, tmpdir))
+ 'sudo', 'mount', mapped_device, tmpdir)
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
try:
- if key:
- # inject key file
- _inject_key_into_fs(key, tmpdir)
- if net:
- _inject_net_into_fs(net, tmpdir)
+ inject_data_into_fs(tmpdir, key, net, utils.execute)
finally:
# unmount device
- utils.execute('sudo umount %s' % mapped_device)
+ utils.execute('sudo', 'umount', mapped_device)
finally:
# remove temporary directory
- utils.execute('rmdir %s' % tmpdir)
+ utils.execute('rmdir', tmpdir)
if not partition is None:
# remove partitions
- utils.execute('sudo kpartx -d %s' % device)
+ utils.execute('sudo', 'kpartx', '-d', device)
finally:
_unlink_device(device, nbd)
+def setup_container(image, container_dir=None, nbd=False):
+ """Setup the LXC container.
+
+ It will mount the loopback image to the container directory in order
+ to create the root filesystem for the container.
+
+ LXC does not support qcow2 images yet.
+ """
+ try:
+ device = _link_device(image, nbd)
+ utils.execute('sudo', 'mount', device, container_dir)
+ except Exception, exn:
+ LOG.exception(_('Failed to mount filesystem: %s'), exn)
+ _unlink_device(device, nbd)
+
+
+def destroy_container(target, instance, nbd=False):
+ """Destroy the container once it terminates.
+
+ It will umount the container that is mounted, try to find the loopback
+ device associated with the container and delete it.
+
+ LXC does not support qcow2 images yet.
+ """
+ try:
+ container_dir = '%s/rootfs' % target
+ utils.execute('sudo', 'umount', container_dir)
+ finally:
+ out, err = utils.execute('sudo', 'losetup', '-a')
+ for loop in out.splitlines():
+ if instance['name'] in loop:
+ device = loop.split(loop, ':')
+ _unlink_device(device, nbd)
+
+
def _link_device(image, nbd):
"""Link image to device using loopback or nbd"""
if nbd:
device = _allocate_device()
- utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
+ utils.execute('sudo', 'qemu-nbd', '-c', device, image)
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuuing
for i in xrange(FLAGS.timeout_nbd):
@@ -125,7 +164,7 @@ def _link_device(image, nbd):
time.sleep(1)
raise exception.Error(_('nbd device %s did not show up') % device)
else:
- out, err = utils.execute('sudo losetup --find --show %s' % image)
+ out, err = utils.execute('sudo', 'losetup', '--find', '--show', image)
if err:
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
@@ -135,13 +174,13 @@ def _link_device(image, nbd):
def _unlink_device(device, nbd):
"""Unlink image from device using loopback or nbd"""
if nbd:
- utils.execute('sudo qemu-nbd -d %s' % device)
+ utils.execute('sudo', 'qemu-nbd', '-d', device)
_free_device(device)
else:
- utils.execute('sudo losetup --detach %s' % device)
+ utils.execute('sudo', 'losetup', '--detach', device)
-_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)]
+_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
def _allocate_device():
@@ -161,28 +200,40 @@ def _free_device(device):
_DEVICES.append(device)
-def _inject_key_into_fs(key, fs):
+def inject_data_into_fs(fs, key, net, execute):
+ """Injects data into a filesystem already mounted by the caller.
+ Virt connections can call this directly if they mount their fs
+ in a different way to inject_data
+ """
+ if key:
+ _inject_key_into_fs(key, fs, execute=execute)
+ if net:
+ _inject_net_into_fs(net, fs, execute=execute)
+
+
+def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
- utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
- utils.execute('sudo chown root %s' % sshdir)
- utils.execute('sudo chmod 700 %s' % sshdir)
+ utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter
+ utils.execute('sudo', 'chown', 'root', sshdir)
+ utils.execute('sudo', 'chmod', '700', sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+ utils.execute('sudo', 'tee', '-a', keyfile,
+ process_input='\n' + key.strip() + '\n')
-def _inject_net_into_fs(net, fs):
+def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
- utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
- utils.execute('sudo chown root:root %s' % netdir)
- utils.execute('sudo chmod 755 %s' % netdir)
+ utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter
+ utils.execute('sudo', 'chown', 'root:root', netdir)
+ utils.execute('sudo', 'chmod', 755, netdir)
netfile = os.path.join(netdir, 'interfaces')
- utils.execute('sudo tee %s' % netfile, net)
+ utils.execute('sudo', 'tee', netfile, process_input=net)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
new file mode 100644
index 000000000..eb9626d08
--- /dev/null
+++ b/nova/virt/driver.py
@@ -0,0 +1,243 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Driver base-classes:
+
+ (Beginning of) the contract that compute drivers must follow, and shared
+ types that support that contract
+"""
+
+from nova.compute import power_state
+
+
+class InstanceInfo(object):
+ def __init__(self, name, state):
+ self.name = name
+ assert state in power_state.valid_states(), "Bad state: %s" % state
+ self.state = state
+
+
+class ComputeDriver(object):
+ """Base class for compute drivers.
+
+ Lots of documentation is currently on fake.py.
+ """
+
+ def init_host(self, host):
+ """Adopt existing VM's running here"""
+ raise NotImplementedError()
+
+ def get_info(self, instance_name):
+ """Get the current status of an instance, by name (not ID!)
+
+ Returns a dict containing:
+ :state: the running state, one of the power_state codes
+ :max_mem: (int) the maximum memory in KBytes allowed
+ :mem: (int) the memory in KBytes used by the domain
+ :num_cpu: (int) the number of virtual CPUs for the domain
+ :cpu_time: (int) the CPU time used in nanoseconds
+ """
+ raise NotImplementedError()
+
+ def list_instances(self):
+ raise NotImplementedError()
+
+ def list_instances_detail(self):
+ """Return a list of InstanceInfo for all registered VMs"""
+ raise NotImplementedError()
+
+ def spawn(self, instance, network_info=None):
+ """Launch a VM for the specified instance"""
+ raise NotImplementedError()
+
+ def destroy(self, instance, cleanup=True):
+ """Destroy (shutdown and delete) the specified instance.
+
+ The given parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name.
+
+ The work will be done asynchronously. This function returns a
+ task that allows the caller to detect when it is complete.
+
+ If the instance is not found (for example if networking failed), this
+ function should still succeed. It's probably a good idea to log a
+ warning in that case.
+
+ """
+ raise NotImplementedError()
+
+ def reboot(self, instance):
+ """Reboot specified VM"""
+ raise NotImplementedError()
+
+ def snapshot_instance(self, context, instance_id, image_id):
+ raise NotImplementedError()
+
+ def get_console_pool_info(self, console_type):
+ raise NotImplementedError()
+
+ def get_console_output(self, instance):
+ raise NotImplementedError()
+
+ def get_ajax_console(self, instance):
+ raise NotImplementedError()
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics"""
+ raise NotImplementedError()
+
+ def get_host_ip_addr(self):
+ raise NotImplementedError()
+
+ def attach_volume(self, context, instance_id, volume_id, mountpoint):
+ raise NotImplementedError()
+
+ def detach_volume(self, context, instance_id, volume_id):
+ raise NotImplementedError()
+
+ def compare_cpu(self, context, cpu_info):
+ raise NotImplementedError()
+
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Transfers the VHD of a running instance to another host, then shuts
+ off the instance copies over the COW disk"""
+ raise NotImplementedError()
+
+ def snapshot(self, instance, image_id):
+ """Create snapshot from a running VM instance."""
+ raise NotImplementedError()
+
+ def finish_resize(self, instance, disk_info):
+ """Completes a resize, turning on the migrated instance"""
+ raise NotImplementedError()
+
+ def revert_resize(self, instance):
+ """Reverts a resize, powering back on the instance"""
+ raise NotImplementedError()
+
+ def pause(self, instance, callback):
+ """Pause VM instance"""
+ raise NotImplementedError()
+
+ def unpause(self, instance, callback):
+ """Unpause paused VM instance"""
+ raise NotImplementedError()
+
+ def suspend(self, instance, callback):
+ """suspend the specified instance"""
+ raise NotImplementedError()
+
+ def resume(self, instance, callback):
+ """resume the specified instance"""
+ raise NotImplementedError()
+
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ raise NotImplementedError()
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ raise NotImplementedError()
+
+ def update_available_resource(self, ctxt, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called when nova-compute launches, and
+ whenever admin executes "nova-manage service update_resource".
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+ raise NotImplementedError()
+
+ def live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Spawning live_migration operation for distributing high-load.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ raise NotImplementedError()
+
+ def reset_network(self, instance):
+ """reset networking for specified instance"""
+ raise NotImplementedError()
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """Setting up filtering rules and waiting for its completion.
+
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filtering rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed.
+
+ :params instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+ raise NotImplementedError()
+
+ def unfilter_instance(self, instance):
+ """Stop filtering instance"""
+ raise NotImplementedError()
+
+ def set_admin_password(self, context, instance_id, new_pass=None):
+ """Set the root/admin password for an instance on this server."""
+ raise NotImplementedError()
+
+ def inject_file(self, instance, b64_path, b64_contents):
+ """Create a file on the VM instance. The file path and contents
+ should be base64-encoded.
+ """
+ raise NotImplementedError()
+
+ def inject_network_info(self, instance):
+ """inject network info for specified instance"""
+ raise NotImplementedError()
+
+ def poll_rescued_instances(self, timeout):
+ """Poll for rescued instances"""
+ raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 4346dffc1..c3d5230df 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -26,7 +26,13 @@ semantics of real hypervisor connections.
"""
from nova import exception
+from nova import log as logging
+from nova import utils
from nova.compute import power_state
+from nova.virt import driver
+
+
+LOG = logging.getLogger('nova.compute.disk')
def get_connection(_):
@@ -34,7 +40,14 @@ def get_connection(_):
return FakeConnection.instance()
-class FakeConnection(object):
+class FakeInstance(object):
+
+ def __init__(self, name, state):
+ self.name = name
+ self.state = state
+
+
+class FakeConnection(driver.ComputeDriver):
"""
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
@@ -90,6 +103,17 @@ class FakeConnection(object):
"""
return self.instances.keys()
+ def _map_to_instance_info(self, instance):
+ instance = utils.check_isinstance(instance, FakeInstance)
+ info = driver.InstanceInfo(instance.name, instance.state)
+ return info
+
+ def list_instances_detail(self):
+ info_list = []
+ for instance in self.instances.values():
+ info_list.append(self._map_to_instance_info(instance))
+ return info_list
+
def spawn(self, instance):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -109,9 +133,10 @@ class FakeConnection(object):
that it was before this call began.
"""
- fake_instance = FakeInstance()
- self.instances[instance.name] = fake_instance
- fake_instance._state = power_state.RUNNING
+ name = instance.name
+ state = power_state.RUNNING
+ fake_instance = FakeInstance(name, state)
+ self.instances[name] = fake_instance
def snapshot(self, instance, name):
"""
@@ -139,6 +164,24 @@ class FakeConnection(object):
"""
pass
+ def get_host_ip_addr(self):
+ """
+ Retrieves the IP address of the dom0
+ """
+ pass
+
+ def resize(self, instance, flavor):
+ """
+ Resizes/Migrates the specified instance.
+
+ The flavor parameter determines whether or not the instance RAM and
+ disk space are modified, and if so, to what size.
+
+ The work will be done asynchronously. This function returns a task
+ that allows the caller to detect when it is complete.
+ """
+ pass
+
def set_admin_password(self, instance, new_pass):
"""
Set the root password on the specified instance.
@@ -179,6 +222,19 @@ class FakeConnection(object):
"""
pass
+ def migrate_disk_and_power_off(self, instance, dest):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ pass
+
+ def attach_disk(self, instance, disk_info):
+ """
+ Attaches the disk to an instance given the metadata disk_info
+ """
+ pass
+
def pause(self, instance, callback):
"""
Pause the specified instance.
@@ -204,16 +260,12 @@ class FakeConnection(object):
pass
def destroy(self, instance):
- """
- Destroy (shutdown and delete) the specified instance.
-
- The given parameter is an instance of nova.compute.service.Instance,
- and so the instance is being specified as instance.name.
-
- The work will be done asynchronously. This function returns a
- task that allows the caller to detect when it is complete.
- """
- del self.instances[instance.name]
+ key = instance.name
+ if key in self.instances:
+ del self.instances[key]
+ else:
+ LOG.warning("Key '%s' not in instances '%s'" %
+ (key, self.instances))
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach the disk at device_path to the instance at mountpoint"""
@@ -239,7 +291,7 @@ class FakeConnection(object):
raise exception.NotFound(_("Instance %s Not Found")
% instance_name)
i = self.instances[instance_name]
- return {'state': i._state,
+ return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
@@ -292,7 +344,7 @@ class FakeConnection(object):
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
- return [0L, 0L, 0L, 0L, null]
+ return [0L, 0L, 0L, 0L, None]
def interface_stats(self, instance_name, iface_id):
"""
@@ -323,6 +375,11 @@ class FakeConnection(object):
'host': 'fakeajaxconsole.com',
'port': 6969}
+ def get_vnc_console(self, instance):
+ return {'token': 'FAKETOKEN',
+ 'host': 'fakevncconsole.com',
+ 'port': 6969}
+
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
@@ -376,8 +433,27 @@ class FakeConnection(object):
"""
return True
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
-class FakeInstance(object):
+ def compare_cpu(self, xml):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
- def __init__(self):
- self._state = power_state.NOSTATE
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only by libvirt."""
+ return
+
+ def unfilter_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def test_remove_vm(self, instance_name):
+ """ Removes the named VM, as if it crashed. For testing"""
+ self.instances.pop(instance_name)
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 29d18dac5..13f403a66 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -68,6 +68,7 @@ from nova import flags
from nova import log as logging
from nova.auth import manager
from nova.compute import power_state
+from nova.virt import driver
from nova.virt import images
wmi = None
@@ -108,8 +109,9 @@ def get_connection(_):
return HyperVConnection()
-class HyperVConnection(object):
+class HyperVConnection(driver.ComputeDriver):
def __init__(self):
+ super(HyperVConnection, self).__init__()
self._conn = wmi.WMI(moniker='//./root/virtualization')
self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
@@ -124,6 +126,19 @@ class HyperVConnection(object):
for v in self._conn.Msvm_ComputerSystem(['ElementName'])]
return vms
+ def list_instances_detail(self):
+ # TODO(justinsb): This is a terrible implementation (1+N)
+ instance_infos = []
+ for instance_name in self.list_instances():
+ info = self.get_info(instance_name)
+
+ state = info['state']
+
+ instance_info = driver.InstanceInfo(instance_name, state)
+ instance_infos.append(instance_info)
+
+ return instance_infos
+
def spawn(self, instance):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
@@ -345,7 +360,7 @@ class HyperVConnection(object):
newinst = cl.new()
#Copy the properties from the original.
for prop in wmi_obj._properties:
- newinst.Properties_.Item(prop).Value =\
+ newinst.Properties_.Item(prop).Value = \
wmi_obj.Properties_.Item(prop).Value
return newinst
@@ -467,3 +482,10 @@ class HyperVConnection(object):
if vm is None:
raise exception.NotFound('Cannot detach volume from missing %s '
% instance_name)
+
+ def poll_rescued_instances(self, timeout):
+ pass
+
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 7a6fef330..2e3f2ee4d 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -28,29 +28,32 @@ import time
import urllib2
import urlparse
+from nova import context
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.auth import signer
-from nova.objectstore import image
FLAGS = flags.FLAGS
-flags.DEFINE_bool('use_s3', True,
- 'whether to get images from s3 or use local copy')
-
LOG = logging.getLogger('nova.virt.images')
-def fetch(image, path, user, project):
- if FLAGS.use_s3:
- f = _fetch_s3_image
- else:
- f = _fetch_local_image
- return f(image, path, user, project)
+def fetch(image_id, path, _user, _project):
+ # TODO(vish): Improve context handling and add owner and auth data
+ # when it is added to glance. Right now there is no
+ # auth checking in glance, so we assume that access was
+ # checked before we got here.
+ image_service = utils.import_object(FLAGS.image_service)
+ with open(path, "wb") as image_file:
+ elevated = context.get_admin_context()
+ metadata = image_service.get(elevated, image_id, image_file)
+ return metadata
+# NOTE(vish): The methods below should be unnecessary, but I'm leaving
+# them in case the glance client does not work on windows.
def _fetch_image_no_curl(url, path, headers):
request = urllib2.Request(url)
for (k, v) in headers.iteritems():
@@ -94,8 +97,7 @@ def _fetch_s3_image(image, path, user, project):
cmd += ['-H', '\'%s: %s\'' % (k, v)]
cmd += ['-o', path]
- cmd_out = ' '.join(cmd)
- return utils.execute(cmd_out)
+ return utils.execute(*cmd)
def _fetch_local_image(image, path, user, project):
@@ -103,13 +105,15 @@ def _fetch_local_image(image, path, user, project):
if sys.platform.startswith('win'):
return shutil.copy(source, path)
else:
- return utils.execute('cp %s %s' % (source, path))
+ return utils.execute('cp', source, path)
def _image_path(path):
return os.path.join(FLAGS.images_path, path)
+# TODO(vish): xenapi should use the glance client code directly instead
+# of retrieving the image using this method.
def image_url(image):
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template
index 87b92b84a..e527cf35c 100644
--- a/nova/virt/interfaces.template
+++ b/nova/virt/interfaces.template
@@ -5,13 +5,20 @@
auto lo
iface lo inet loopback
-# The primary network interface
-auto eth0
-iface eth0 inet static
- address %(address)s
- netmask %(netmask)s
- broadcast %(broadcast)s
- gateway %(gateway)s
- dns-nameservers %(dns)s
+#for $ifc in $interfaces
+auto ${ifc.name}
+iface ${ifc.name} inet static
+ address ${ifc.address}
+ netmask ${ifc.netmask}
+ broadcast ${ifc.broadcast}
+ gateway ${ifc.gateway}
+ dns-nameservers ${ifc.dns}
+#if $use_ipv6
+iface ${ifc.name} inet6 static
+ address ${ifc.address_v6}
+ netmask ${ifc.netmask_v6}
+ gateway ${ifc.gateway_v6}
+#end if
+#end for
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 88bfbc668..de2497a76 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -2,7 +2,12 @@
<name>${name}</name>
<memory>${memory_kb}</memory>
<os>
-#if $type == 'uml'
+#if $type == 'lxc'
+ #set $disk_prefix = ''
+ #set $disk_bus = ''
+ <type>exe</type>
+ <init>/sbin/init</init>
+#else if $type == 'uml'
#set $disk_prefix = 'ubd'
#set $disk_bus = 'uml'
<type>uml</type>
@@ -44,7 +49,13 @@
</features>
<vcpu>${vcpus}</vcpu>
<devices>
-#if $getVar('rescue', False)
+#if $type == 'lxc'
+ <filesystem type='mount'>
+ <source dir='${basepath}/rootfs'/>
+ <target dir='/'/>
+ </filesystem>
+#else
+ #if $getVar('rescue', False)
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk.rescue'/>
@@ -55,36 +66,39 @@
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
-#else
+ #else
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
- #if $getVar('local', False)
- <disk type='file'>
- <driver type='${driver_type}'/>
- <source file='${basepath}/disk.local'/>
- <target dev='${disk_prefix}b' bus='${disk_bus}'/>
- </disk>
+ #if $getVar('local', False)
+ <disk type='file'>
+ <driver type='${driver_type}'/>
+ <source file='${basepath}/disk.local'/>
+ <target dev='${disk_prefix}b' bus='${disk_bus}'/>
+ </disk>
+ #end if
#end if
#end if
+
+#for $nic in $nics
<interface type='bridge'>
- <source bridge='${bridge_name}'/>
- <mac address='${mac_address}'/>
+ <source bridge='${nic.bridge_name}'/>
+ <mac address='${nic.mac_address}'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
- <filterref filter="nova-instance-${name}">
- <parameter name="IP" value="${ip_address}" />
- <parameter name="DHCPSERVER" value="${dhcp_server}" />
-#if $getVar('extra_params', False)
- ${extra_params}
+ <filterref filter="nova-instance-${name}-${nic.id}">
+ <parameter name="IP" value="${nic.ip_address}" />
+ <parameter name="DHCPSERVER" value="${nic.dhcp_server}" />
+#if $getVar('nic.extra_params', False)
+ ${nic.extra_params}
#end if
-#if $getVar('ra_server', False)
- <parameter name="RASERVER" value="${ra_server}" />
+#if $getVar('nic.gateway_v6', False)
+ <parameter name="RASERVER" value="${nic.gateway_v6}" />
#end if
</filterref>
</interface>
-
+#end for
<!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='${basepath}/console.log'/>
@@ -101,5 +115,8 @@
<target port='0'/>
</serial>
+#if $getVar('vncserver_host', False)
+ <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='${vncserver_host}'/>
+#end if
</devices>
</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 4e0fd106f..2be190256 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -20,7 +20,7 @@
"""
A connection to a hypervisor through libvirt.
-Supports KVM, QEMU, UML, and XEN.
+Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
@@ -36,16 +36,19 @@ Supports KVM, QEMU, UML, and XEN.
"""
+import multiprocessing
import os
-import shutil
import random
+import shutil
import subprocess
+import sys
+import tempfile
+import time
import uuid
from xml.dom import minidom
-
+from xml.etree import ElementTree
from eventlet import greenthread
-from eventlet import event
from eventlet import tpool
import IPy
@@ -55,12 +58,14 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+#from nova import test
from nova import utils
-#from nova.api import context
+from nova import vnc
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import disk
+from nova.virt import driver
from nova.virt import images
libvirt = None
@@ -70,20 +75,19 @@ Template = None
LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
+flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
-flags.DEFINE_string('injected_network_template',
- utils.abspath('virt/interfaces.template'),
- 'Template file for injected network')
+
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
flags.DEFINE_string('libvirt_type',
'kvm',
'Libvirt domain type (valid options are: '
- 'kvm, qemu, uml, xen)')
+ 'kvm, lxc, qemu, uml, xen)')
flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
@@ -100,6 +104,19 @@ flags.DEFINE_string('ajaxterm_portrange',
flags.DEFINE_string('firewall_driver',
'nova.virt.libvirt_conn.IptablesFirewallDriver',
'Firewall driver (defaults to iptables)')
+flags.DEFINE_string('cpuinfo_xml_template',
+ utils.abspath('virt/cpuinfo.xml.template'),
+ 'CpuInfo XML Template (Used only live migration now)')
+flags.DEFINE_string('live_migration_uri',
+ "qemu+tcp://%s/system",
+ 'Define protocol used by live_migration feature')
+flags.DEFINE_string('live_migration_flag',
+ "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
+ 'Define live migration behavior.')
+flags.DEFINE_integer('live_migration_bandwidth', 0,
+ 'Define live migration behavior')
+flags.DEFINE_string('qemu_img', 'qemu-img',
+ 'binary to use for qemu-img commands')
def get_connection(read_only):
@@ -120,8 +137,8 @@ def get_connection(read_only):
def _late_load_cheetah():
global Template
if Template is None:
- t = __import__('Cheetah.Template', globals(), locals(), ['Template'],
- -1)
+ t = __import__('Cheetah.Template', globals(), locals(),
+ ['Template'], -1)
Template = t.Template
@@ -140,12 +157,60 @@ def _get_ip_version(cidr):
return int(net.version())
-class LibvirtConnection(object):
+def _get_network_info(instance):
+ # TODO(adiantum) If we will keep this function
+ # we should cache network_info
+ admin_context = context.get_admin_context()
+
+ ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
+ instance['id'])
+
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ network_info = []
+
+ def ip_dict(ip):
+ return {
+ "ip": ip.address,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict(ip6):
+ prefix = ip6.network.cidr_v6
+ mac = instance.mac_address
+ return {
+ "ip": utils.to_global_ipv6(prefix, mac),
+ "netmask": ip6.network.netmask_v6,
+ "gateway": ip6.network.gateway_v6,
+ "enabled": "1"}
+
+ for network in networks:
+ network_ips = [ip for ip in ip_addresses
+ if ip.network_id == network.id]
+
+ mapping = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance.mac_address,
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_ips]}
+
+ if FLAGS.use_ipv6:
+ mapping['ip6s'] = [ip6_dict(ip) for ip in network_ips]
+
+ network_info.append((network, mapping))
+ return network_info
+
+
+class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
+ super(LibvirtConnection, self).__init__()
self.libvirt_uri = self.get_uri()
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
+ self.interfaces_xml = open(FLAGS.injected_network_template).read()
+ self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
self._wrapped_conn = None
self.read_only = read_only
@@ -202,6 +267,8 @@ class LibvirtConnection(object):
uri = FLAGS.libvirt_uri or 'uml:///system'
elif FLAGS.libvirt_type == 'xen':
uri = FLAGS.libvirt_uri or 'xen:///'
+ elif FLAGS.libvirt_type == 'lxc':
+ uri = FLAGS.libvirt_uri or 'lxc:///'
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
return uri
@@ -220,6 +287,29 @@ class LibvirtConnection(object):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
+ def _map_to_instance_info(self, domain):
+ """Gets info from a virsh domain object into an InstanceInfo"""
+
+ # domain.info() returns a list of:
+ # state: one of the state values (virDomainState)
+ # maxMemory: the maximum memory used by the domain
+ # memory: the current amount of memory used by the domain
+ # nbVirtCPU: the number of virtual CPU
+ # puTime: the time used by the domain in nanoseconds
+
+ (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
+ name = domain.name()
+
+ return driver.InstanceInfo(name, state)
+
+ def list_instances_detail(self):
+ infos = []
+ for domain_id in self._conn.listDomainsID():
+ domain = self._conn.lookupByID(domain_id)
+ info = self._map_to_instance_info(domain)
+ infos.append(info)
+ return infos
+
def destroy(self, instance, cleanup=True):
try:
virt_dom = self._conn.lookupByName(instance['name'])
@@ -257,6 +347,8 @@ class LibvirtConnection(object):
instance_name = instance['name']
LOG.info(_('instance %(instance_name)s: deleting instance files'
' %(target)s') % locals())
+ if FLAGS.libvirt_type == 'lxc':
+ disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images)
if os.path.exists(target):
shutil.rmtree(target)
@@ -315,16 +407,77 @@ class LibvirtConnection(object):
@exception.wrap_exception
def snapshot(self, instance, image_id):
- """ Create snapshot from a running VM instance """
- raise NotImplementedError(
- _("Instance snapshotting is not supported for libvirt"
- "at this time"))
+ """Create snapshot from a running VM instance.
+
+ This command only works with qemu 0.14+, the qemu_img flag is
+ provided so that a locally compiled binary of qemu-img can be used
+ to support this command.
+
+ """
+ image_service = utils.import_object(FLAGS.image_service)
+ virt_dom = self._conn.lookupByName(instance['name'])
+ elevated = context.get_admin_context()
+
+ base = image_service.show(elevated, instance['image_id'])
+
+ metadata = {'disk_format': base['disk_format'],
+ 'container_format': base['container_format'],
+ 'is_public': False,
+ 'properties': {'architecture': base['architecture'],
+ 'type': base['type'],
+ 'name': '%s.%s' % (base['name'], image_id),
+ 'kernel_id': instance['kernel_id'],
+ 'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance['project_id'],
+ 'ramdisk_id': instance['ramdisk_id'],
+ }
+ }
+
+ # Make the snapshot
+ snapshot_name = uuid.uuid4().hex
+ snapshot_xml = """
+ <domainsnapshot>
+ <name>%s</name>
+ </domainsnapshot>
+ """ % snapshot_name
+ snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0)
+
+ # Find the disk
+ xml_desc = virt_dom.XMLDesc(0)
+ domain = ElementTree.fromstring(xml_desc)
+ source = domain.find('devices/disk/source')
+ disk_path = source.get('file')
+
+ # Export the snapshot to a raw image
+ temp_dir = tempfile.mkdtemp()
+ out_path = os.path.join(temp_dir, snapshot_name)
+ qemu_img_cmd = '%s convert -f qcow2 -O raw -s %s %s %s' % (
+ FLAGS.qemu_img,
+ snapshot_name,
+ disk_path,
+ out_path)
+ utils.execute(qemu_img_cmd)
+
+ # Upload that image to the image service
+ with open(out_path) as image_file:
+ image_service.update(elevated,
+ image_id,
+ metadata,
+ image_file)
+
+ # Clean up
+ shutil.rmtree(temp_dir)
@exception.wrap_exception
def reboot(self, instance):
self.destroy(instance, False)
xml = self.to_xml(instance)
+ self.firewall_driver.setup_basic_filtering(instance)
+ self.firewall_driver.prepare_instance_filter(instance)
self._conn.createXML(xml, 0)
+ self.firewall_driver.apply_instance_filter(instance)
+
timer = utils.LoopingCall(f=None)
def _wait_for_reboot():
@@ -347,22 +500,22 @@ class LibvirtConnection(object):
@exception.wrap_exception
def pause(self, instance, callback):
- raise exception.APIError("pause not supported for libvirt.")
+ raise exception.ApiError("pause not supported for libvirt.")
@exception.wrap_exception
def unpause(self, instance, callback):
- raise exception.APIError("unpause not supported for libvirt.")
+ raise exception.ApiError("unpause not supported for libvirt.")
@exception.wrap_exception
def suspend(self, instance, callback):
- raise exception.APIError("suspend not supported for libvirt")
+ raise exception.ApiError("suspend not supported for libvirt")
@exception.wrap_exception
def resume(self, instance, callback):
- raise exception.APIError("resume not supported for libvirt")
+ raise exception.ApiError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance):
+ def rescue(self, instance, callback=None):
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -392,21 +545,27 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance):
+ def unrescue(self, instance, callback=None):
# NOTE(vish): Because reboot destroys and recreates an instance using
# the normal xml file, we can just call reboot here
self.reboot(instance)
@exception.wrap_exception
- def spawn(self, instance):
- xml = self.to_xml(instance)
+ def poll_rescued_instances(self, timeout):
+ pass
+
+ # NOTE(ilyaalekseyev): Implementation like in multinics
+ # for xenapi(tr3buchet)
+ @exception.wrap_exception
+ def spawn(self, instance, network_info=None):
+ xml = self.to_xml(instance, network_info)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.NOSTATE,
'launching')
- self.firewall_driver.setup_basic_filtering(instance)
- self.firewall_driver.prepare_instance_filter(instance)
- self._create_image(instance, xml)
+ self.firewall_driver.setup_basic_filtering(instance, network_info)
+ self.firewall_driver.prepare_instance_filter(instance, network_info)
+ self._create_image(instance, xml, network_info)
self._conn.createXML(xml, 0)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -438,8 +597,10 @@ class LibvirtConnection(object):
if virsh_output.startswith('/dev/'):
LOG.info(_("cool, it's a device"))
- out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
- virsh_output, check_exit_code=False)
+ out, err = utils.execute('sudo', 'dd',
+ "if=%s" % virsh_output,
+ 'iflag=nonblock',
+ check_exit_code=False)
return out
else:
return ''
@@ -461,14 +622,17 @@ class LibvirtConnection(object):
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
- utils.execute('sudo chown %d %s' % (os.getuid(), console_log))
+ utils.execute('sudo', 'chown', os.getuid(), console_log)
if FLAGS.libvirt_type == 'xen':
# Xen is special
- virsh_output = utils.execute("virsh ttyconsole %s" %
+ virsh_output = utils.execute('virsh', 'ttyconsole',
instance['name'])
data = self._flush_xen_console(virsh_output)
fpath = self._append_to_file(data, console_log)
+ elif FLAGS.libvirt_type == 'lxc':
+ # LXC is also special
+ LOG.info(_("Unable to read LXC console"))
else:
fpath = console_log
@@ -482,9 +646,10 @@ class LibvirtConnection(object):
port = random.randint(int(start_port), int(end_port))
# netcat will exit with 0 only if the port is in use,
# so a nonzero return value implies it is unused
- cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
- stdout, stderr = utils.execute(cmd)
- if stdout.strip() == 'free':
+ cmd = 'netcat', '0.0.0.0', port, '-w', '1'
+ try:
+ stdout, stderr = utils.execute(*cmd, process_input='')
+ except exception.ProcessExecutionError:
return port
raise Exception(_('Unable to find an open port'))
@@ -511,7 +676,26 @@ class LibvirtConnection(object):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
- def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs):
+ @exception.wrap_exception
+ def get_vnc_console(self, instance):
+ def get_vnc_port_for_instance(instance_name):
+ virt_dom = self._conn.lookupByName(instance_name)
+ xml = virt_dom.XMLDesc(0)
+ # TODO: use etree instead of minidom
+ dom = minidom.parseString(xml)
+
+ for graphic in dom.getElementsByTagName('graphics'):
+ if graphic.getAttribute('type') == 'vnc':
+ return graphic.getAttribute('port')
+
+ port = get_vnc_port_for_instance(instance['name'])
+ token = str(uuid.uuid4())
+ host = instance['host']
+
+ return {'token': token, 'host': host, 'port': port}
+
+ @staticmethod
+ def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
@@ -530,14 +714,20 @@ class LibvirtConnection(object):
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base = os.path.join(base_dir, fname)
- if not os.path.exists(base):
- fn(target=base, *args, **kwargs)
+
+ @utils.synchronized(fname)
+ def call_if_not_exists(base, fn, *args, **kwargs):
+ if not os.path.exists(base):
+ fn(target=base, *args, **kwargs)
+
+ call_if_not_exists(base, fn, *args, **kwargs)
+
if cow:
- utils.execute('qemu-img create -f qcow2 -o '
- 'cluster_size=2M,backing_file=%s %s'
- % (base, target))
+ utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
+ 'cluster_size=2M,backing_file=%s' % base,
+ target)
else:
- utils.execute('cp %s %s' % (base, target))
+ utils.execute('cp', base, target)
def _fetch_image(self, target, image_id, user, project, size=None):
"""Grab image and optionally attempt to resize it"""
@@ -547,10 +737,17 @@ class LibvirtConnection(object):
def _create_local(self, target, local_gb):
"""Create a blank image of specified size"""
- utils.execute('truncate %s -s %dG' % (target, local_gb))
+ utils.execute('truncate', target, '-s', "%dG" % local_gb)
# TODO(vish): should we format disk by default?
- def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
+ def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
+ network_info=None):
+ if not network_info:
+ network_info = _get_network_info(inst)
+
+ if not suffix:
+ suffix = ''
+
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(FLAGS.instances_path,
@@ -558,13 +755,17 @@ class LibvirtConnection(object):
fname + suffix)
# ensure directories exist and are writable
- utils.execute('mkdir -p %s' % basepath(suffix=''))
+ utils.execute('mkdir', '-p', basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
+ if FLAGS.libvirt_type == 'lxc':
+ container_dir = '%s/rootfs' % basepath(suffix='')
+ utils.execute('mkdir', '-p', container_dir)
+
# NOTE(vish): No need add the suffix to console.log
os.close(os.open(basepath('console.log', ''),
os.O_CREAT | os.O_WRONLY, 0660))
@@ -578,21 +779,23 @@ class LibvirtConnection(object):
'ramdisk_id': inst['ramdisk_id']}
if disk_images['kernel_id']:
+ fname = '%08x' % int(disk_images['kernel_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('kernel'),
- fname=disk_images['kernel_id'],
+ fname=fname,
image_id=disk_images['kernel_id'],
user=user,
project=project)
if disk_images['ramdisk_id']:
+ fname = '%08x' % int(disk_images['ramdisk_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('ramdisk'),
- fname=disk_images['ramdisk_id'],
+ fname=fname,
image_id=disk_images['ramdisk_id'],
user=user,
project=project)
- root_fname = disk_images['image_id']
+ root_fname = '%08x' % int(disk_images['image_id'])
size = FLAGS.minimum_root_size
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
size = None
@@ -606,7 +809,7 @@ class LibvirtConnection(object):
user=user,
project=project,
size=size)
- type_data = instance_types.INSTANCE_TYPES[inst['instance_type']]
+ type_data = instance_types.get_instance_type(inst['instance_type'])
if type_data['local_gb']:
self._cache_image(fn=self._create_local,
@@ -622,23 +825,47 @@ class LibvirtConnection(object):
if not inst['kernel_id']:
target_partition = "1"
- key = str(inst['key_data'])
+ if FLAGS.libvirt_type == 'lxc':
+ target_partition = None
+
+ if inst['key_data']:
+ key = str(inst['key_data'])
+ else:
+ key = None
net = None
- network_ref = db.network_get_by_instance(context.get_admin_context(),
- inst['id'])
- if network_ref['injected']:
- admin_context = context.get_admin_context()
- address = db.instance_get_fixed_address(admin_context, inst['id'])
- ra_server = network_ref['ra_server']
- if not ra_server:
- ra_server = "fd00::"
- with open(FLAGS.injected_network_template) as f:
- net = f.read() % {'address': address,
- 'netmask': network_ref['netmask'],
- 'gateway': network_ref['gateway'],
- 'broadcast': network_ref['broadcast'],
- 'dns': network_ref['dns'],
- 'ra_server': ra_server}
+
+ nets = []
+ ifc_template = open(FLAGS.injected_network_template).read()
+ ifc_num = -1
+ have_injected_networks = False
+ admin_context = context.get_admin_context()
+ for (network_ref, mapping) in network_info:
+ ifc_num += 1
+
+ if not network_ref['injected']:
+ continue
+
+ have_injected_networks = True
+ address = mapping['ips'][0]['ip']
+ address_v6 = None
+ if FLAGS.use_ipv6:
+ address_v6 = mapping['ip6s'][0]['ip']
+ net_info = {'name': 'eth%d' % ifc_num,
+ 'address': address,
+ 'netmask': network_ref['netmask'],
+ 'gateway': network_ref['gateway'],
+ 'broadcast': network_ref['broadcast'],
+ 'dns': network_ref['dns'],
+ 'address_v6': address_v6,
+ 'gateway_v6': network_ref['gateway_v6'],
+ 'netmask_v6': network_ref['netmask_v6']}
+ nets.append(net_info)
+
+ if have_injected_networks:
+ net = str(Template(ifc_template,
+ searchList=[{'interfaces': nets,
+ 'use_ipv6': FLAGS.use_ipv6}]))
+
if key or net:
inst_name = inst['name']
img_id = inst.image_id
@@ -652,27 +879,24 @@ class LibvirtConnection(object):
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
nbd=FLAGS.use_cow_images)
+
+ if FLAGS.libvirt_type == 'lxc':
+ disk.setup_container(basepath('disk'),
+ container_dir=container_dir,
+ nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
- utils.execute('sudo chown root %s' % basepath('disk'))
+ utils.execute('sudo', 'chown', 'root', basepath('disk'))
- def to_xml(self, instance, rescue=False):
- # TODO(termie): cache?
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- # FIXME(vish): stick this in db
- instance_type = instance['instance_type']
- instance_type = instance_types.INSTANCE_TYPES[instance_type]
- ip_address = db.instance_get_fixed_address(context.get_admin_context(),
- instance['id'])
+ def _get_nic_for_xml(self, network, mapping):
# Assume that the gateway also acts as the dhcp server.
dhcp_server = network['gateway']
- ra_server = network['ra_server']
+ gateway_v6 = network['gateway_v6']
+ mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
if FLAGS.use_ipv6:
@@ -697,6 +921,38 @@ class LibvirtConnection(object):
(net, mask)
else:
extra_params = "\n"
+
+ result = {
+ 'id': mac_id,
+ 'bridge_name': network['bridge'],
+ 'mac_address': mapping['mac'],
+ 'ip_address': mapping['ips'][0]['ip'],
+ 'dhcp_server': dhcp_server,
+ 'extra_params': extra_params,
+ }
+
+ if gateway_v6:
+ result['gateway_v6'] = gateway_v6 + "/128"
+
+ return result
+
+ def to_xml(self, instance, rescue=False, network_info=None):
+ # TODO(termie): cache?
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+
+ # TODO(adiantum) remove network_info creation code
+ # when multinics will be completed
+ if not network_info:
+ network_info = _get_network_info(instance)
+
+ nics = []
+ for (network, mapping) in network_info:
+ nics.append(self._get_nic_for_xml(network,
+ mapping))
+ # FIXME(vish): stick this in db
+ instance_type_name = instance['instance_type']
+ instance_type = instance_types.get_instance_type(instance_type_name)
+
if FLAGS.use_cow_images:
driver_type = 'qcow2'
else:
@@ -708,17 +964,14 @@ class LibvirtConnection(object):
instance['name']),
'memory_kb': instance_type['memory_mb'] * 1024,
'vcpus': instance_type['vcpus'],
- 'bridge_name': network['bridge'],
- 'mac_address': instance['mac_address'],
- 'ip_address': ip_address,
- 'dhcp_server': dhcp_server,
- 'extra_params': extra_params,
'rescue': rescue,
'local': instance_type['local_gb'],
- 'driver_type': driver_type}
+ 'driver_type': driver_type,
+ 'nics': nics}
- if ra_server:
- xml_info['ra_server'] = ra_server + "/128"
+ if FLAGS.vnc_enabled:
+ if FLAGS.libvirt_type != 'lxc':
+ xml_info['vncserver_host'] = FLAGS.vncserver_host
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
@@ -731,7 +984,6 @@ class LibvirtConnection(object):
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'),
instance['name'])
-
return xml
def get_info(self, instance_name):
@@ -748,7 +1000,7 @@ class LibvirtConnection(object):
'cpu_time': cpu_time}
def get_diagnostics(self, instance_name):
- raise exception.APIError(_("diagnostics are not supported "
+ raise exception.ApiError(_("diagnostics are not supported "
"for libvirt"))
def get_disks(self, instance_name):
@@ -835,6 +1087,181 @@ class LibvirtConnection(object):
return interfaces
+ def get_vcpu_total(self):
+ """Get vcpu number of physical computer.
+
+ :returns: the number of cpu core.
+
+ """
+
+ # On certain platforms, this will raise a NotImplementedError.
+ try:
+ return multiprocessing.cpu_count()
+ except NotImplementedError:
+ LOG.warn(_("Cannot get the number of cpu, because this "
+ "function is not implemented for this platform. "
+ "This error can be safely ignored for now."))
+ return 0
+
+ def get_memory_mb_total(self):
+ """Get the total memory size(MB) of physical computer.
+
+ :returns: the total amount of memory(MB).
+
+ """
+
+ if sys.platform.upper() != 'LINUX2':
+ return 0
+
+ meminfo = open('/proc/meminfo').read().split()
+ idx = meminfo.index('MemTotal:')
+ # transforming kb to mb.
+ return int(meminfo[idx + 1]) / 1024
+
+ def get_local_gb_total(self):
+ """Get the total hdd size(GB) of physical computer.
+
+ :returns:
+ The total amount of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+
+ """
+
+ hddinfo = os.statvfs(FLAGS.instances_path)
+ return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024
+
+ def get_vcpu_used(self):
+ """ Get vcpu usage number of physical computer.
+
+ :returns: The total number of vcpu that currently used.
+
+ """
+
+ total = 0
+ for dom_id in self._conn.listDomainsID():
+ dom = self._conn.lookupByID(dom_id)
+ total += len(dom.vcpus()[1])
+ return total
+
+ def get_memory_mb_used(self):
+ """Get the free memory size(MB) of physical computer.
+
+ :returns: the total usage of memory(MB).
+
+ """
+
+ if sys.platform.upper() != 'LINUX2':
+ return 0
+
+ m = open('/proc/meminfo').read().split()
+ idx1 = m.index('MemFree:')
+ idx2 = m.index('Buffers:')
+ idx3 = m.index('Cached:')
+ avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
+ return self.get_memory_mb_total() - avail
+
+ def get_local_gb_used(self):
+ """Get the free hdd size(GB) of physical computer.
+
+ :returns:
+ The total usage of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+
+ """
+
+ hddinfo = os.statvfs(FLAGS.instances_path)
+ avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024
+ return self.get_local_gb_total() - avail
+
+ def get_hypervisor_type(self):
+ """Get hypervisor type.
+
+ :returns: hypervisor type (ex. qemu)
+
+ """
+
+ return self._conn.getType()
+
+ def get_hypervisor_version(self):
+ """Get hypervisor version.
+
+ :returns: hypervisor version (ex. 12003)
+
+ """
+
+ # NOTE(justinsb): getVersion moved between libvirt versions
+ # Trying to do be compatible with older versions is a lost cause
+ # But ... we can at least give the user a nice message
+ method = getattr(self._conn, 'getVersion', None)
+ if method is None:
+ raise exception.Error(_("libvirt version is too old"
+ " (does not support getVersion)"))
+ # NOTE(justinsb): If we wanted to get the version, we could:
+ # method = getattr(libvirt, 'getVersion', None)
+ # NOTE(justinsb): This would then rely on a proper version check
+
+ return method()
+
+ def get_cpu_info(self):
+ """Get cpuinfo information.
+
+ Obtains cpu feature from virConnect.getCapabilities,
+ and returns as a json string.
+
+ :return: see above description
+
+ """
+
+ xml = self._conn.getCapabilities()
+ xml = libxml2.parseDoc(xml)
+ nodes = xml.xpathEval('//host/cpu')
+ if len(nodes) != 1:
+ raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
+ "but %d\n") % len(nodes)
+ + xml.serialize())
+
+ cpu_info = dict()
+
+ arch_nodes = xml.xpathEval('//host/cpu/arch')
+ if arch_nodes:
+ cpu_info['arch'] = arch_nodes[0].getContent()
+
+ model_nodes = xml.xpathEval('//host/cpu/model')
+ if model_nodes:
+ cpu_info['model'] = model_nodes[0].getContent()
+
+ vendor_nodes = xml.xpathEval('//host/cpu/vendor')
+ if vendor_nodes:
+ cpu_info['vendor'] = vendor_nodes[0].getContent()
+
+ topology_nodes = xml.xpathEval('//host/cpu/topology')
+ topology = dict()
+ if topology_nodes:
+ topology_node = topology_nodes[0].get_properties()
+ while topology_node:
+ name = topology_node.get_name()
+ topology[name] = topology_node.getContent()
+ topology_node = topology_node.get_next()
+
+ keys = ['cores', 'sockets', 'threads']
+ tkeys = topology.keys()
+ if set(tkeys) != set(keys):
+ ks = ', '.join(keys)
+ raise exception.Invalid(_("Invalid xml: topology"
+ "(%(topology)s) must have "
+ "%(ks)s") % locals())
+
+ feature_nodes = xml.xpathEval('//host/cpu/feature')
+ features = list()
+ for nodes in feature_nodes:
+ features.append(nodes.get_properties().getContent())
+
+ cpu_info['topology'] = topology
+ cpu_info['features'] = features
+ return utils.dumps(cpu_info)
+
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name, not an Instance, so
@@ -865,9 +1292,214 @@ class LibvirtConnection(object):
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
+ def update_available_resource(self, ctxt, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called when nova-coompute launches, and
+ whenever admin executes "nova-manage service update_resource".
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+
+ try:
+ service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
+ except exception.NotFound:
+ raise exception.Invalid(_("Cannot update compute manager "
+ "specific info, because no service "
+ "record was found."))
+
+ # Updating host information
+ dic = {'vcpus': self.get_vcpu_total(),
+ 'memory_mb': self.get_memory_mb_total(),
+ 'local_gb': self.get_local_gb_total(),
+ 'vcpus_used': self.get_vcpu_used(),
+ 'memory_mb_used': self.get_memory_mb_used(),
+ 'local_gb_used': self.get_local_gb_used(),
+ 'hypervisor_type': self.get_hypervisor_type(),
+ 'hypervisor_version': self.get_hypervisor_version(),
+ 'cpu_info': self.get_cpu_info()}
+
+ compute_node_ref = service_ref['compute_node']
+ if not compute_node_ref:
+ LOG.info(_('Compute_service record created for %s ') % host)
+ dic['service_id'] = service_ref['id']
+ db.compute_node_create(ctxt, dic)
+ else:
+ LOG.info(_('Compute_service record updated for %s ') % host)
+ db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
+
+ def compare_cpu(self, cpu_info):
+ """Checks the host cpu is compatible to a cpu given by xml.
+
+ "xml" must be a part of libvirt.openReadonly().getCapabilities().
+ return values follows by virCPUCompareResult.
+ if 0 > return value, do live migration.
+ 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
+
+ :param cpu_info: json string that shows cpu feature(see get_cpu_info())
+ :returns:
+ None. if given cpu info is not compatible to this server,
+ raise exception.
+
+ """
+
+ LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
+ dic = utils.loads(cpu_info)
+ xml = str(Template(self.cpuinfo_xml, searchList=dic))
+ LOG.info(_('to xml...\n:%s ' % xml))
+
+ u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
+ m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
+ # unknown character exists in xml, then libvirt complains
+ try:
+ ret = self._conn.compareCPU(xml, 0)
+ except libvirt.libvirtError, e:
+ ret = e.message
+ LOG.error(m % locals())
+ raise
+
+ if ret <= 0:
+ raise exception.Invalid(m % locals())
+
+ return
+
+ def ensure_filtering_rules_for_instance(self, instance_ref,
+ time=None):
+ """Setting up filtering rules and waiting for its completion.
+
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filterling rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed.
+
+ :params instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+
+ if not time:
+ time = greenthread
+
+ # If any instances never launch at destination host,
+ # basic-filtering must be set here.
+ self.firewall_driver.setup_basic_filtering(instance_ref)
+ # setting up n)ova-instance-instance-xx mainly.
+ self.firewall_driver.prepare_instance_filter(instance_ref)
+
+ # wait for completion
+ timeout_count = range(FLAGS.live_migration_retry_count)
+ while timeout_count:
+ try:
+ filter_name = 'nova-instance-%s' % instance_ref.name
+ self._conn.nwfilterLookupByName(filter_name)
+ break
+ except libvirt.libvirtError:
+ timeout_count.pop()
+ if len(timeout_count) == 0:
+ ec2_id = instance_ref['hostname']
+ iname = instance_ref.name
+ msg = _('Timeout migrating for %(ec2_id)s(%(iname)s)')
+ raise exception.Error(msg % locals())
+ time.sleep(1)
+
+ def live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Spawning live_migration operation for distributing high-load.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+
+ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
+ post_method, recover_method)
+
+ def _live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Do live migration.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+
+ # Do live migration.
+ try:
+ flaglist = FLAGS.live_migration_flag.split(',')
+ flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
+ logical_sum = reduce(lambda x, y: x | y, flagvals)
+
+ if self.read_only:
+ tmpconn = self._connect(self.libvirt_uri, False)
+ dom = tmpconn.lookupByName(instance_ref.name)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
+ tmpconn.close()
+ else:
+ dom = self._conn.lookupByName(instance_ref.name)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
+
+ except Exception:
+ recover_method(ctxt, instance_ref)
+ raise
+
+ # Waiting for completion of live_migration.
+ timer = utils.LoopingCall(f=None)
+
+ def wait_for_live_migration():
+ """waiting for live migration completion"""
+ try:
+ self.get_info(instance_ref.name)['state']
+ except exception.NotFound:
+ timer.stop()
+ post_method(ctxt, instance_ref, dest)
+
+ timer.f = wait_for_live_migration
+ timer.start(interval=0.5, now=True)
+
+ def unfilter_instance(self, instance_ref):
+ """See comments of same method in firewall_driver."""
+ self.firewall_driver.unfilter_instance(instance_ref)
+
class FirewallDriver(object):
- def prepare_instance_filter(self, instance):
+ def prepare_instance_filter(self, instance, network_info=None):
"""Prepare filters for the instance.
At this point, the instance isn't running yet."""
@@ -901,7 +1533,7 @@ class FirewallDriver(object):
the security group."""
raise NotImplementedError()
- def setup_basic_filtering(self, instance):
+ def setup_basic_filtering(self, instance, network_info=None):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
@@ -910,11 +1542,6 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
- def _ra_server_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['ra_server']
-
class NWFilterFirewall(FirewallDriver):
"""
@@ -1006,10 +1633,13 @@ class NWFilterFirewall(FirewallDriver):
</rule>
</filter>'''
- def setup_basic_filtering(self, instance):
+ def setup_basic_filtering(self, instance, network_info=None):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
logging.info('called setup_basic_filtering in nwfilter')
+ if not network_info:
+ network_info = _get_network_info(instance)
+
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
@@ -1018,9 +1648,11 @@ class NWFilterFirewall(FirewallDriver):
logging.info('ensuring static filters')
self._ensure_static_filters()
- instance_filter_name = self._instance_filter_name(instance)
- self._define_filter(self._filter_container(instance_filter_name,
- ['nova-base']))
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ self._define_filter(self._filter_container(instance_filter_name,
+ ['nova-base']))
def _ensure_static_filters(self):
if self.static_filters_configured:
@@ -1111,48 +1743,60 @@ class NWFilterFirewall(FirewallDriver):
# Nothing to do
pass
- def prepare_instance_filter(self, instance):
+ def prepare_instance_filter(self, instance, network_info=None):
"""
Creates an NWFilter for the given instance. In the process,
it makes sure the filters for the security groups as well as
the base filter are all in place.
"""
+ if not network_info:
+ network_info = _get_network_info(instance)
if instance['image_id'] == FLAGS.vpn_image_id:
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
- instance_filter_name = self._instance_filter_name(instance)
- instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,)
- instance_filter_children = [base_filter, instance_secgroup_filter_name]
+ ctxt = context.get_admin_context()
+
+ instance_secgroup_filter_name = \
+ '%s-secgroup' % (self._instance_filter_name(instance))
+ #% (instance_filter_name,)
+
instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
'nova-allow-dhcp-server']
- if FLAGS.use_ipv6:
- ra_server = self._ra_server_for_instance(instance)
- if ra_server:
- instance_secgroup_filter_children += ['nova-allow-ra-server']
-
- ctxt = context.get_admin_context()
-
- if FLAGS.allow_project_net_traffic:
- instance_filter_children += ['nova-project']
- if FLAGS.use_ipv6:
- instance_filter_children += ['nova-project-v6']
- for security_group in db.security_group_get_by_instance(ctxt,
- instance['id']):
+ for security_group in \
+ db.security_group_get_by_instance(ctxt, instance['id']):
self.refresh_security_group_rules(security_group['id'])
instance_secgroup_filter_children += [('nova-secgroup-%s' %
- security_group['id'])]
+ security_group['id'])]
- self._define_filter(
+ self._define_filter(
self._filter_container(instance_secgroup_filter_name,
instance_secgroup_filter_children))
- self._define_filter(
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ instance_filter_children = \
+ [base_filter, instance_secgroup_filter_name]
+
+ if FLAGS.use_ipv6:
+ gateway_v6 = network['gateway_v6']
+
+ if gateway_v6:
+ instance_secgroup_filter_children += \
+ ['nova-allow-ra-server']
+
+ if FLAGS.allow_project_net_traffic:
+ instance_filter_children += ['nova-project']
+ if FLAGS.use_ipv6:
+ instance_filter_children += ['nova-project-v6']
+
+ self._define_filter(
self._filter_container(instance_filter_name,
instance_filter_children))
@@ -1200,144 +1844,145 @@ class NWFilterFirewall(FirewallDriver):
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
- def _instance_filter_name(self, instance):
- return 'nova-instance-%s' % instance['name']
+ def _instance_filter_name(self, instance, nic_id=None):
+ if not nic_id:
+ return 'nova-instance-%s' % (instance['name'])
+ return 'nova-instance-%s-%s' % (instance['name'], nic_id)
class IptablesFirewallDriver(FirewallDriver):
def __init__(self, execute=None, **kwargs):
- self.execute = execute or utils.execute
+ from nova.network import linux_net
+ self.iptables = linux_net.iptables_manager
self.instances = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
- def setup_basic_filtering(self, instance):
+ self.iptables.ipv4['filter'].add_chain('sg-fallback')
+ self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
+ self.iptables.ipv6['filter'].add_chain('sg-fallback')
+ self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
+
+ def setup_basic_filtering(self, instance, network_info=None):
"""Use NWFilter from libvirt for this."""
- return self.nwfilter.setup_basic_filtering(instance)
+ if not network_info:
+ network_info = _get_network_info(instance)
+ return self.nwfilter.setup_basic_filtering(instance, network_info)
def apply_instance_filter(self, instance):
"""No-op. Everything is done in prepare_instance_filter"""
pass
- def remove_instance(self, instance):
- if instance['id'] in self.instances:
- del self.instances[instance['id']]
+ def unfilter_instance(self, instance):
+ if self.instances.pop(instance['id'], None):
+ self.remove_filters_for_instance(instance)
+ self.iptables.apply()
else:
LOG.info(_('Attempted to unfilter instance %s which is not '
- 'filtered'), instance['id'])
+ 'filtered'), instance['id'])
- def add_instance(self, instance):
+ def prepare_instance_filter(self, instance, network_info=None):
+ if not network_info:
+ network_info = _get_network_info(instance)
self.instances[instance['id']] = instance
+ self.add_filters_for_instance(instance, network_info)
+ self.iptables.apply()
- def unfilter_instance(self, instance):
- self.remove_instance(instance)
- self.apply_ruleset()
-
- def prepare_instance_filter(self, instance):
- self.add_instance(instance)
- self.apply_ruleset()
-
- def apply_ruleset(self):
- current_filter, _ = self.execute('sudo iptables-save -t filter')
- current_lines = current_filter.split('\n')
- new_filter = self.modify_rules(current_lines, 4)
- self.execute('sudo iptables-restore',
- process_input='\n'.join(new_filter))
- if(FLAGS.use_ipv6):
- current_filter, _ = self.execute('sudo ip6tables-save -t filter')
- current_lines = current_filter.split('\n')
- new_filter = self.modify_rules(current_lines, 6)
- self.execute('sudo ip6tables-restore',
- process_input='\n'.join(new_filter))
+ def add_filters_for_instance(self, instance, network_info=None):
+ if not network_info:
+ network_info = _get_network_info(instance)
+ chain_name = self._instance_chain_name(instance)
+
+ self.iptables.ipv4['filter'].add_chain(chain_name)
- def modify_rules(self, current_lines, ip_version=4):
+ ips_v4 = [ip['ip'] for (_, mapping) in network_info
+ for ip in mapping['ips']]
+
+ for ipv4_address in ips_v4:
+ self.iptables.ipv4['filter'].add_rule('local',
+ '-d %s -j $%s' %
+ (ipv4_address, chain_name))
+
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ ips_v6 = [ip['ip'] for (_, mapping) in network_info
+ for ip in mapping['ip6s']]
+
+ for ipv6_address in ips_v6:
+ self.iptables.ipv6['filter'].add_rule('local',
+ '-d %s -j $%s' %
+ (ipv6_address,
+ chain_name))
+
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule(chain_name, rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+
+ def remove_filters_for_instance(self, instance):
+ chain_name = self._instance_chain_name(instance)
+
+ self.iptables.ipv4['filter'].remove_chain(chain_name)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].remove_chain(chain_name)
+
+ def instance_rules(self, instance, network_info=None):
+ if not network_info:
+ network_info = _get_network_info(instance)
ctxt = context.get_admin_context()
- # Remove any trace of nova rules.
- new_filter = filter(lambda l: 'nova-' not in l, current_lines)
-
- seen_chains = False
- for rules_index in range(len(new_filter)):
- if not seen_chains:
- if new_filter[rules_index].startswith(':'):
- seen_chains = True
- elif seen_chains == 1:
- if not new_filter[rules_index].startswith(':'):
- break
- our_chains = [':nova-fallback - [0:0]']
- our_rules = ['-A nova-fallback -j DROP']
-
- our_chains += [':nova-local - [0:0]']
- our_rules += ['-A FORWARD -j nova-local']
- our_rules += ['-A OUTPUT -j nova-local']
-
- security_groups = {}
- # Add our chains
- # First, we add instance chains and rules
- for instance_id in self.instances:
- instance = self.instances[instance_id]
- chain_name = self._instance_chain_name(instance)
- if(ip_version == 4):
- ip_address = self._ip_for_instance(instance)
- elif(ip_version == 6):
- ip_address = self._ip_for_instance_v6(instance)
-
- our_chains += [':%s - [0:0]' % chain_name]
-
- # Jump to the per-instance chain
- our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
- chain_name)]
-
- # Always drop invalid packets
- our_rules += ['-A %s -m state --state '
- 'INVALID -j DROP' % (chain_name,)]
-
- # Allow established connections
- our_rules += ['-A %s -m state --state '
- 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
-
- # Jump to each security group chain in turn
- for security_group in \
- db.security_group_get_by_instance(ctxt,
- instance['id']):
- security_groups[security_group['id']] = security_group
-
- sg_chain_name = self._security_group_chain_name(
- security_group['id'])
+ ipv4_rules = []
+ ipv6_rules = []
- our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
-
- if(ip_version == 4):
- # Allow DHCP responses
- dhcp_server = self._dhcp_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
- '-j ACCEPT ' % (chain_name, dhcp_server)]
- #Allow project network traffic
- if (FLAGS.allow_project_net_traffic):
- cidr = self._project_cidr_for_instance(instance)
- our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
- elif(ip_version == 6):
- # Allow RA responses
- ra_server = self._ra_server_for_instance(instance)
- if ra_server:
- our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' %
- (chain_name, ra_server + "/128")]
- #Allow project network traffic
- if (FLAGS.allow_project_net_traffic):
- cidrv6 = self._project_cidrv6_for_instance(instance)
- our_rules += ['-A %s -s %s -j ACCEPT' %
- (chain_name, cidrv6)]
-
- # If nothing matches, jump to the fallback chain
- our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
+ # Always drop invalid packets
+ ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
+ ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
- # then, security group chains and rules
- for security_group_id in security_groups:
- chain_name = self._security_group_chain_name(security_group_id)
- our_chains += [':%s - [0:0]' % chain_name]
+ # Allow established connections
+ ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+ ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+
+ dhcp_servers = [network['gateway'] for (network, _m) in network_info]
- rules = \
- db.security_group_rule_get_by_security_group(ctxt,
- security_group_id)
+ for dhcp_server in dhcp_servers:
+ ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT' % (dhcp_server,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrs = [network['cidr'] for (network, _m) in network_info]
+ for cidr in cidrs:
+ ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
+
+ # We wrap these in FLAGS.use_ipv6 because they might cause
+ # a DB lookup. The other ones are just list operations, so
+ # they're not worth the clutter.
+ if FLAGS.use_ipv6:
+ # Allow RA responses
+ gateways_v6 = [network['gateway_v6'] for (network, _) in
+ network_info]
+ for gateway_v6 in gateways_v6:
+ ipv6_rules.append(
+ '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrv6s = [network['cidr_v6'] for (network, _m)
+ in network_info]
+
+ for cidrv6 in cidrv6s:
+ ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
+
+ security_groups = db.security_group_get_by_instance(ctxt,
+ instance['id'])
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ rules = db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
for rule in rules:
logging.info('%r', rule)
@@ -1348,14 +1993,16 @@ class IptablesFirewallDriver(FirewallDriver):
continue
version = _get_ip_version(rule.cidr)
- if version != ip_version:
- continue
+ if version == 4:
+ rules = ipv4_rules
+ else:
+ rules = ipv6_rules
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
- args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr]
+ args = ['-p', protocol, '-s', rule.cidr]
if rule.protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
@@ -1376,57 +2023,36 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
- if(ip_version == 4):
+ if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
- elif(ip_version == 6):
+ elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j ACCEPT']
- our_rules += [' '.join(args)]
+ rules += [' '.join(args)]
+
+ ipv4_rules += ['-j $sg-fallback']
+ ipv6_rules += ['-j $sg-fallback']
- new_filter[rules_index:rules_index] = our_rules
- new_filter[rules_index:rules_index] = our_chains
- logging.info('new_filter: %s', '\n'.join(new_filter))
- return new_filter
+ return ipv4_rules, ipv6_rules
def refresh_security_group_members(self, security_group):
pass
def refresh_security_group_rules(self, security_group):
- self.apply_ruleset()
+ self.do_refresh_security_group_rules(security_group)
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def do_refresh_security_group_rules(self, security_group):
+ for instance in self.instances.values():
+ self.remove_filters_for_instance(instance)
+ self.add_filters_for_instance(instance)
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)
def _instance_chain_name(self, instance):
- return 'nova-inst-%s' % (instance['id'],)
-
- def _ip_for_instance(self, instance):
- return db.instance_get_fixed_address(context.get_admin_context(),
- instance['id'])
-
- def _ip_for_instance_v6(self, instance):
- return db.instance_get_fixed_address_v6(context.get_admin_context(),
- instance['id'])
-
- def _dhcp_server_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['gateway']
-
- def _ra_server_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['ra_server']
-
- def _project_cidr_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['cidr']
-
- def _project_cidrv6_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['cidr_v6']
+ return 'inst-%s' % (instance['id'],)
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
new file mode 100644
index 000000000..d9b27de08
--- /dev/null
+++ b/nova/virt/vmwareapi/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API.
+"""
diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py
new file mode 100644
index 000000000..53fa8f24d
--- /dev/null
+++ b/nova/virt/vmwareapi/error_util.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception classes and SOAP response error checking module.
+"""
+
+FAULT_NOT_AUTHENTICATED = "NotAuthenticated"
+FAULT_ALREADY_EXISTS = "AlreadyExists"
+
+
+class VimException(Exception):
+ """The VIM Exception class."""
+
+ def __init__(self, exception_summary, excep):
+ Exception.__init__(self)
+ self.exception_summary = exception_summary
+ self.exception_obj = excep
+
+ def __str__(self):
+ return self.exception_summary + str(self.exception_obj)
+
+
+class SessionOverLoadException(VimException):
+ """Session Overload Exception."""
+ pass
+
+
+class VimAttributeError(VimException):
+ """VI Attribute Error."""
+ pass
+
+
+class VimFaultException(Exception):
+ """The VIM Fault exception class."""
+
+ def __init__(self, fault_list, excep):
+ Exception.__init__(self)
+ self.fault_list = fault_list
+ self.exception_obj = excep
+
+ def __str__(self):
+ return str(self.exception_obj)
+
+
+class FaultCheckers(object):
+ """
+ Methods for fault checking of SOAP response. Per Method error handlers
+ for which we desire error checking are defined. SOAP faults are
+ embedded in the SOAP messages as properties and not as SOAP faults.
+ """
+
+ @staticmethod
+ def retrieveproperties_fault_checker(resp_obj):
+ """
+ Checks the RetrieveProperties response for errors. Certain faults
+ are sent as part of the SOAP body as property of missingSet.
+ For example NotAuthenticated fault.
+ """
+ fault_list = []
+ if not resp_obj:
+ # This is the case when the session has timed out. ESX SOAP server
+ # sends an empty RetrievePropertiesResponse. Normally missingSet in
+ # the returnval field has the specifics about the error, but that's
+ # not the case with a timed out idle session. It is as bad as a
+ # terminated session for we cannot use the session. So setting
+ # fault to NotAuthenticated fault.
+ fault_list = ["NotAuthenticated"]
+ else:
+ for obj_cont in resp_obj:
+ if hasattr(obj_cont, "missingSet"):
+ for missing_elem in obj_cont.missingSet:
+ fault_type = \
+ missing_elem.fault.fault.__class__.__name__
+ # Fault needs to be added to the type of fault for
+ # uniformity in error checking as SOAP faults define
+ fault_list.append(fault_type)
+ if fault_list:
+ exc_msg_list = ', '.join(fault_list)
+ raise VimFaultException(fault_list, Exception(_("Error(s) %s "
+ "occurred in the call to RetrieveProperties") %
+ exc_msg_list))
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
new file mode 100644
index 000000000..4bb467fa9
--- /dev/null
+++ b/nova/virt/vmwareapi/fake.py
@@ -0,0 +1,711 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A fake VMWare VI API implementation.
+"""
+
+from pprint import pformat
+import uuid
+
+from nova import exception
+from nova import log as logging
+from nova.virt.vmwareapi import vim
+from nova.virt.vmwareapi import error_util
+
+_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
+ 'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
+ 'files']
+
+_FAKE_FILE_SIZE = 1024
+
+_db_content = {}
+
+LOG = logging.getLogger("nova.virt.vmwareapi.fake")
+
+
+def log_db_contents(msg=None):
+ """Log DB Contents."""
+ text = msg or ""
+ content = pformat(_db_content)
+ LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
+
+
+def reset():
+ """Resets the db contents."""
+ for c in _CLASSES:
+ # We fake the datastore by keeping the file references as a list of
+ # names in the db
+ if c == 'files':
+ _db_content[c] = []
+ else:
+ _db_content[c] = {}
+ create_network()
+ create_host_network_system()
+ create_host()
+ create_datacenter()
+ create_datastore()
+ create_res_pool()
+
+
+def cleanup():
+ """Clear the db contents."""
+ for c in _CLASSES:
+ _db_content[c] = {}
+
+
+def _create_object(table, table_obj):
+ """Create an object in the db."""
+ _db_content[table][table_obj.obj] = table_obj
+
+
+def _get_objects(obj_type):
+ """Get objects of the type."""
+ lst_objs = []
+ for key in _db_content[obj_type]:
+ lst_objs.append(_db_content[obj_type][key])
+ return lst_objs
+
+
+class Prop(object):
+ """Property Object base class."""
+
+ def __init__(self):
+ self.name = None
+ self.val = None
+
+
+class ManagedObject(object):
+ """Managed Data Object base class."""
+
+ def __init__(self, name="ManagedObject", obj_ref=None):
+ """Sets the obj property which acts as a reference to the object."""
+ super(ManagedObject, self).__setattr__('objName', name)
+ if obj_ref is None:
+ obj_ref = str(uuid.uuid4())
+ object.__setattr__(self, 'obj', obj_ref)
+ object.__setattr__(self, 'propSet', [])
+
+ def set(self, attr, val):
+ """
+ Sets an attribute value. Not using the __setattr__ directly for we
+ want to set attributes of the type 'a.b.c' and using this function
+ class we set the same.
+ """
+ self.__setattr__(attr, val)
+
+ def get(self, attr):
+ """
+ Gets an attribute. Used as an intermediary to get nested
+ property like 'a.b.c' value.
+ """
+ return self.__getattr__(attr)
+
+ def __setattr__(self, attr, val):
+ for prop in self.propSet:
+ if prop.name == attr:
+ prop.val = val
+ return
+ elem = Prop()
+ elem.name = attr
+ elem.val = val
+ self.propSet.append(elem)
+
+ def __getattr__(self, attr):
+ for elem in self.propSet:
+ if elem.name == attr:
+ return elem.val
+ raise exception.Error(_("Property %(attr)s not set for the managed "
+ "object %(objName)s") %
+ {'attr': attr,
+ 'objName': self.objName})
+
+
+class DataObject(object):
+ """Data object base class."""
+ pass
+
+
+class VirtualDisk(DataObject):
+ """
+ Virtual Disk class. Does nothing special except setting
+ __class__.__name__ to 'VirtualDisk'. Refer place where __class__.__name__
+ is used in the code.
+ """
+ pass
+
+
+class VirtualDiskFlatVer2BackingInfo(DataObject):
+ """VirtualDiskFlatVer2BackingInfo class."""
+ pass
+
+
+class VirtualLsiLogicController(DataObject):
+ """VirtualLsiLogicController class."""
+ pass
+
+
+class VirtualMachine(ManagedObject):
+ """Virtual Machine class."""
+
+ def __init__(self, **kwargs):
+ super(VirtualMachine, self).__init__("VirtualMachine")
+ self.set("name", kwargs.get("name"))
+ self.set("runtime.connectionState",
+ kwargs.get("conn_state", "connected"))
+ self.set("summary.config.guestId", kwargs.get("guest", "otherGuest"))
+ ds_do = DataObject()
+ ds_do.ManagedObjectReference = [kwargs.get("ds").obj]
+ self.set("datastore", ds_do)
+ self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
+ "toolsOk"))
+ self.set("summary.guest.toolsRunningStatus", kwargs.get(
+ "toolsrunningstate", "guestToolsRunning"))
+ self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
+ self.set("config.files.vmPathName", kwargs.get("vmPathName"))
+ self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
+ self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
+ self.set("config.hardware.device", kwargs.get("virtual_disk", None))
+ self.set("config.extraConfig", kwargs.get("extra_config", None))
+
+ def reconfig(self, factory, val):
+ """
+ Called to reconfigure the VM. Actually customizes the property
+ setting of the Virtual Machine object.
+ """
+ try:
+ # Case of Reconfig of VM to attach disk
+ controller_key = val.deviceChange[1].device.controllerKey
+ filename = val.deviceChange[1].device.backing.fileName
+
+ disk = VirtualDisk()
+ disk.controllerKey = controller_key
+
+ disk_backing = VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ disk_backing.key = -101
+ disk.backing = disk_backing
+
+ controller = VirtualLsiLogicController()
+ controller.key = controller_key
+
+ self.set("config.hardware.device", [disk, controller])
+ except AttributeError:
+ # Case of Reconfig of VM to set extra params
+ self.set("config.extraConfig", val.extraConfig)
+
+
+class Network(ManagedObject):
+ """Network class."""
+
+ def __init__(self):
+ super(Network, self).__init__("Network")
+ self.set("summary.name", "vmnet0")
+
+
+class ResourcePool(ManagedObject):
+ """Resource Pool class."""
+
+ def __init__(self):
+ super(ResourcePool, self).__init__("ResourcePool")
+ self.set("name", "ResPool")
+
+
+class Datastore(ManagedObject):
+ """Datastore class."""
+
+ def __init__(self):
+ super(Datastore, self).__init__("Datastore")
+ self.set("summary.type", "VMFS")
+ self.set("summary.name", "fake-ds")
+
+
+class HostNetworkSystem(ManagedObject):
+ """HostNetworkSystem class."""
+
+ def __init__(self):
+ super(HostNetworkSystem, self).__init__("HostNetworkSystem")
+ self.set("name", "networkSystem")
+
+ pnic_do = DataObject()
+ pnic_do.device = "vmnic0"
+
+ net_info_pnic = DataObject()
+ net_info_pnic.PhysicalNic = [pnic_do]
+
+ self.set("networkInfo.pnic", net_info_pnic)
+
+
+class HostSystem(ManagedObject):
+ """Host System class."""
+
+ def __init__(self):
+ super(HostSystem, self).__init__("HostSystem")
+ self.set("name", "ha-host")
+ if _db_content.get("HostNetworkSystem", None) is None:
+ create_host_network_system()
+ host_net_key = _db_content["HostNetworkSystem"].keys()[0]
+ host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
+ self.set("configManager.networkSystem", host_net_sys)
+
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = "vSwitch0"
+ vswitch_do.portgroup = ["PortGroup-vmnet0"]
+
+ net_swicth = DataObject()
+ net_swicth.HostVirtualSwitch = [vswitch_do]
+ self.set("config.network.vswitch", net_swicth)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-vmnet0"
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = 0
+ pg_spec.name = "vmnet0"
+
+ host_pg_do.spec = pg_spec
+
+ host_pg = DataObject()
+ host_pg.HostPortGroup = [host_pg_do]
+ self.set("config.network.portgroup", host_pg)
+
+ def _add_port_group(self, spec):
+ """Adds a port group to the host system object in the db."""
+ pg_name = spec.name
+ vswitch_name = spec.vswitchName
+ vlanid = spec.vlanId
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = vswitch_name
+ vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
+
+ vswitches = self.get("config.network.vswitch").HostVirtualSwitch
+ vswitches.append(vswitch_do)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-%s" % pg_name
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = vlanid
+ pg_spec.name = pg_name
+
+ host_pg_do.spec = pg_spec
+ host_pgrps = self.get("config.network.portgroup").HostPortGroup
+ host_pgrps.append(host_pg_do)
+
+
+class Datacenter(ManagedObject):
+ """Datacenter class."""
+
+ def __init__(self):
+ super(Datacenter, self).__init__("Datacenter")
+ self.set("name", "ha-datacenter")
+ self.set("vmFolder", "vm_folder_ref")
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+
+class Task(ManagedObject):
+ """Task class."""
+
+ def __init__(self, task_name, state="running"):
+ super(Task, self).__init__("Task")
+ info = DataObject
+ info.name = task_name
+ info.state = state
+ self.set("info", info)
+
+
+def create_host_network_system():
+ host_net_system = HostNetworkSystem()
+ _create_object("HostNetworkSystem", host_net_system)
+
+
+def create_host():
+ host_system = HostSystem()
+ _create_object('HostSystem', host_system)
+
+
+def create_datacenter():
+ data_center = Datacenter()
+ _create_object('Datacenter', data_center)
+
+
+def create_datastore():
+ data_store = Datastore()
+ _create_object('Datastore', data_store)
+
+
+def create_res_pool():
+ res_pool = ResourcePool()
+ _create_object('ResourcePool', res_pool)
+
+
+def create_network():
+ network = Network()
+ _create_object('Network', network)
+
+
+def create_task(task_name, state="running"):
+ task = Task(task_name, state)
+ _create_object("Task", task)
+ return task
+
+
+def _add_file(file_path):
+ """Adds a file reference to the db."""
+ _db_content["files"].append(file_path)
+
+
+def _remove_file(file_path):
+ """Removes a file reference from the db."""
+ if _db_content.get("files") is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ # Check if the remove is for a single file object or for a folder
+ if file_path.find(".vmdk") != -1:
+ if file_path not in _db_content.get("files"):
+ raise exception.NotFound(_("File- '%s' is not there in the "
+ "datastore") % file_path)
+ _db_content.get("files").remove(file_path)
+ else:
+ # Removes the files in the folder and the folder too from the db
+ for file in _db_content.get("files"):
+ if file.find(file_path) != -1:
+ lst_files = _db_content.get("files")
+ if lst_files and lst_files.count(file):
+ lst_files.remove(file)
+
+
+def fake_fetch_image(image, instance, **kwargs):
+ """Fakes fetch image call. Just adds a reference to the db for the file."""
+ ds_name = kwargs.get("datastore_name")
+ file_path = kwargs.get("file_path")
+ ds_file_path = "[" + ds_name + "] " + file_path
+ _add_file(ds_file_path)
+
+
+def fake_upload_image(image, instance, **kwargs):
+ """Fakes the upload of an image."""
+ pass
+
+
+def fake_get_vmdk_size_and_properties(image_id, instance):
+ """Fakes the file size and properties fetch for the image file."""
+ props = {"vmware_ostype": "otherGuest",
+ "vmware_adaptertype": "lsiLogic"}
+ return _FAKE_FILE_SIZE, props
+
+
+def _get_vm_mdo(vm_ref):
+ """Gets the Virtual Machine with the ref from the db."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_("There is no VM registered"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ return _db_content.get("VirtualMachine")[vm_ref]
+
+
+class FakeFactory(object):
+ """Fake factory class for the suds client."""
+
+ def create(self, obj_name):
+ """Creates a namespace object."""
+ return DataObject()
+
+
+class FakeVim(object):
+ """Fake VIM Class."""
+
+ def __init__(self, protocol="https", host="localhost", trace=None):
+ """
+ Initializes the suds client object, sets the service content
+ contents and the cookies for the session.
+ """
+ self._session = None
+ self.client = DataObject()
+ self.client.factory = FakeFactory()
+
+ transport = DataObject()
+ transport.cookiejar = "Fake-CookieJar"
+ options = DataObject()
+ options.transport = transport
+
+ self.client.options = options
+
+ service_content = self.client.factory.create('ns0:ServiceContent')
+ service_content.propertyCollector = "PropCollector"
+ service_content.virtualDiskManager = "VirtualDiskManager"
+ service_content.fileManager = "FileManager"
+ service_content.rootFolder = "RootFolder"
+ service_content.sessionManager = "SessionManager"
+ self._service_content = service_content
+
+ def get_service_content(self):
+ return self._service_content
+
+ def __repr__(self):
+ return "Fake VIM Object"
+
+ def __str__(self):
+ return "Fake VIM Object"
+
+ def _login(self):
+ """Logs in and sets the session object in the db."""
+ self._session = str(uuid.uuid4())
+ session = DataObject()
+ session.key = self._session
+ _db_content['session'][self._session] = session
+ return session
+
+ def _logout(self):
+ """Logs out and remove the session object ref from the db."""
+ s = self._session
+ self._session = None
+ if s not in _db_content['session']:
+ raise exception.Error(
+ _("Logging out a session that is invalid or already logged "
+ "out: %s") % s)
+ del _db_content['session'][s]
+
+ def _terminate_session(self, *args, **kwargs):
+ """Terminates a session."""
+ s = kwargs.get("sessionId")[0]
+ if s not in _db_content['session']:
+ return
+ del _db_content['session'][s]
+
+ def _check_session(self):
+ """Checks if the session is active."""
+ if (self._session is None or self._session not in
+ _db_content['session']):
+ LOG.debug(_("Session is faulty"))
+ raise error_util.VimFaultException(
+ [error_util.FAULT_NOT_AUTHENTICATED],
+ _("Session Invalid"))
+
+ def _create_vm(self, method, *args, **kwargs):
+ """Creates and registers a VM object with the Host System."""
+ config_spec = kwargs.get("config")
+ ds = _db_content["Datastore"][_db_content["Datastore"].keys()[0]]
+ vm_dict = {"name": config_spec.name,
+ "ds": ds,
+ "powerstate": "poweredOff",
+ "vmPathName": config_spec.files.vmPathName,
+ "numCpu": config_spec.numCPUs,
+ "mem": config_spec.memoryMB}
+ virtual_machine = VirtualMachine(**vm_dict)
+ _create_object("VirtualMachine", virtual_machine)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _reconfig_vm(self, method, *args, **kwargs):
+ """Reconfigures a VM and sets the properties supplied."""
+ vm_ref = args[0]
+ vm_mdo = _get_vm_mdo(vm_ref)
+ vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _create_copy_disk(self, method, vmdk_file_path):
+ """Creates/copies a vmdk file object in the datastore."""
+ # We need to add/create both .vmdk and .-flat.vmdk files
+ flat_vmdk_file_path = \
+ vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _add_file(vmdk_file_path)
+ _add_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _snapshot_vm(self, method):
+ """Snapshots a VM. Here we do nothing for faking sake."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_disk(self, method, *args, **kwargs):
+ """Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
+ vmdk_file_path = kwargs.get("name")
+ flat_vmdk_file_path = \
+ vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _remove_file(vmdk_file_path)
+ _remove_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_file(self, method, *args, **kwargs):
+ """Deletes a file from the datastore."""
+ _remove_file(kwargs.get("name"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _just_return(self):
+ """Fakes a return."""
+ return
+
+ def _unregister_vm(self, method, *args, **kwargs):
+ """Unregisters a VM from the Host System."""
+ vm_ref = args[0]
+ _get_vm_mdo(vm_ref)
+ del _db_content["VirtualMachine"][vm_ref]
+
+ def _search_ds(self, method, *args, **kwargs):
+ """Searches the datastore for a file."""
+ ds_path = kwargs.get("datastorePath")
+ if _db_content.get("files", None) is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ for file in _db_content.get("files"):
+ if file.find(ds_path) != -1:
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+ task_mdo = create_task(method, "error")
+ return task_mdo.obj
+
+ def _make_dir(self, method, *args, **kwargs):
+ """Creates a directory in the datastore."""
+ ds_path = kwargs.get("name")
+ if _db_content.get("files", None) is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ _db_content["files"].append(ds_path)
+
+ def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
+ """Sets power state for the VM."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_(" No Virtual Machine has been "
+ "registered yet"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
+ vm_mdo.set("runtime.powerState", pwr_state)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _retrieve_properties(self, method, *args, **kwargs):
+ """Retrieves properties based on the type."""
+ spec_set = kwargs.get("specSet")[0]
+ type = spec_set.propSet[0].type
+ properties = spec_set.propSet[0].pathSet
+ objs = spec_set.objectSet
+ lst_ret_objs = []
+ for obj in objs:
+ try:
+ obj_ref = obj.obj
+ # This means that we are doing a search for the managed
+ # dataobjects of the type in the inventory
+ if obj_ref == "RootFolder":
+ for mdo_ref in _db_content[type]:
+ mdo = _db_content[type][mdo_ref]
+ # Create a temp Managed object which has the same ref
+ # as the parent object and copies just the properties
+ # asked for. We need .obj along with the propSet of
+ # just the properties asked for
+ temp_mdo = ManagedObject(mdo.objName, mdo.obj)
+ for prop in properties:
+ temp_mdo.set(prop, mdo.get(prop))
+ lst_ret_objs.append(temp_mdo)
+ else:
+ if obj_ref in _db_content[type]:
+ mdo = _db_content[type][obj_ref]
+ temp_mdo = ManagedObject(mdo.objName, obj_ref)
+ for prop in properties:
+ temp_mdo.set(prop, mdo.get(prop))
+ lst_ret_objs.append(temp_mdo)
+ except Exception, exc:
+ LOG.exception(exc)
+ continue
+ return lst_ret_objs
+
+ def _add_port_group(self, method, *args, **kwargs):
+ """Adds a port group to the host system."""
+ host_mdo = \
+ _db_content["HostSystem"][_db_content["HostSystem"].keys()[0]]
+ host_mdo._add_port_group(kwargs.get("portgrp"))
+
+ def __getattr__(self, attr_name):
+ if attr_name != "Login":
+ self._check_session()
+ if attr_name == "Login":
+ return lambda *args, **kwargs: self._login()
+ elif attr_name == "Logout":
+ self._logout()
+ elif attr_name == "TerminateSession":
+ return lambda *args, **kwargs: self._terminate_session(
+ *args, **kwargs)
+ elif attr_name == "CreateVM_Task":
+ return lambda *args, **kwargs: self._create_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "ReconfigVM_Task":
+ return lambda *args, **kwargs: self._reconfig_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CreateVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("name"))
+ elif attr_name == "DeleteDatastoreFile_Task":
+ return lambda *args, **kwargs: self._delete_file(attr_name,
+ *args, **kwargs)
+ elif attr_name == "PowerOnVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "PowerOffVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOff")
+ elif attr_name == "RebootGuest":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "ResetVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "SuspendVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "suspended")
+ elif attr_name == "CreateSnapshot_Task":
+ return lambda *args, **kwargs: self._snapshot_vm(attr_name)
+ elif attr_name == "CopyVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("destName"))
+ elif attr_name == "DeleteVirtualDisk_Task":
+ return lambda *args, **kwargs: self._delete_disk(attr_name,
+ *args, **kwargs)
+ elif attr_name == "UnregisterVM":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "SearchDatastore_Task":
+ return lambda *args, **kwargs: self._search_ds(attr_name,
+ *args, **kwargs)
+ elif attr_name == "MakeDirectory":
+ return lambda *args, **kwargs: self._make_dir(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RetrieveProperties":
+ return lambda *args, **kwargs: self._retrieve_properties(
+ attr_name, *args, **kwargs)
+ elif attr_name == "AcquireCloneTicket":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "AddPortGroup":
+ return lambda *args, **kwargs: self._add_port_group(attr_name,
+ *args, **kwargs)
diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py
new file mode 100644
index 000000000..2ec773b7b
--- /dev/null
+++ b/nova/virt/vmwareapi/io_util.py
@@ -0,0 +1,168 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility classes for defining the time saving transfer of data from the reader
+to the write using a LightQueue as a Pipe between the reader and the writer.
+"""
+
+from eventlet import event
+from eventlet import greenthread
+from eventlet.queue import LightQueue
+
+from glance import client
+
+from nova import exception
+from nova import log as logging
+
+LOG = logging.getLogger("nova.virt.vmwareapi.io_util")
+
+IO_THREAD_SLEEP_TIME = .01
+GLANCE_POLL_INTERVAL = 5
+
+
+class ThreadSafePipe(LightQueue):
+ """The pipe to hold the data which the reader writes to and the writer
+ reads from."""
+
+ def __init__(self, maxsize, transfer_size):
+ LightQueue.__init__(self, maxsize)
+ self.transfer_size = transfer_size
+ self.transferred = 0
+
+ def read(self, chunk_size):
+ """Read data from the pipe. Chunksize if ignored for we have ensured
+ that the data chunks written to the pipe by readers is the same as the
+ chunks asked for by the Writer."""
+ if self.transferred < self.transfer_size:
+ data_item = self.get()
+ self.transferred += len(data_item)
+ return data_item
+ else:
+ return ""
+
+ def write(self, data):
+ """Put a data item in the pipe."""
+ self.put(data)
+
+ def close(self):
+ """A place-holder to maintain consistency."""
+ pass
+
+
+class GlanceWriteThread(object):
+ """Ensures that image data is written to in the glance client and that
+ it is in correct ('active')state."""
+
+ def __init__(self, input, glance_client, image_id, image_meta={}):
+ self.input = input
+ self.glance_client = glance_client
+ self.image_id = image_id
+ self.image_meta = image_meta
+ self._running = False
+
+ def start(self):
+ self.done = event.Event()
+
+ def _inner():
+ """Function to do the image data transfer through an update
+ and thereon checks if the state is 'active'."""
+ self.glance_client.update_image(self.image_id,
+ image_meta=self.image_meta,
+ image_data=self.input)
+ self._running = True
+ while self._running:
+ try:
+ image_status = \
+ self.glance_client.get_image_meta(self.image_id).get(
+ "status")
+ if image_status == "active":
+ self.stop()
+ self.done.send(True)
+ # If the state is killed, then raise an exception.
+ elif image_status == "killed":
+ self.stop()
+ exc_msg = _("Glance image %s is in killed state") %\
+ self.image_id
+ LOG.exception(exc_msg)
+ self.done.send_exception(exception.Error(exc_msg))
+ elif image_status in ["saving", "queued"]:
+ greenthread.sleep(GLANCE_POLL_INTERVAL)
+ else:
+ self.stop()
+ exc_msg = _("Glance image "
+ "%(image_id)s is in unknown state "
+ "- %(state)s") % {
+ "image_id": self.image_id,
+ "state": image_status}
+ LOG.exception(exc_msg)
+ self.done.send_exception(exception.Error(exc_msg))
+ except Exception, exc:
+ self.stop()
+ self.done.send_exception(exc)
+
+ greenthread.spawn(_inner)
+ return self.done
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
+
+ def close(self):
+ pass
+
+
+class IOThread(object):
+ """Class that reads chunks from the input file and writes them to the
+ output file till the transfer is completely done."""
+
+ def __init__(self, input, output):
+ self.input = input
+ self.output = output
+ self._running = False
+ self.got_exception = False
+
+ def start(self):
+ self.done = event.Event()
+
+ def _inner():
+ """Read data from the input and write the same to the output
+ until the transfer completes."""
+ self._running = True
+ while self._running:
+ try:
+ data = self.input.read(None)
+ if not data:
+ self.stop()
+ self.done.send(True)
+ self.output.write(data)
+ greenthread.sleep(IO_THREAD_SLEEP_TIME)
+ except Exception, exc:
+ self.stop()
+ LOG.exception(exc)
+ self.done.send_exception(exc)
+
+ greenthread.spawn(_inner)
+ return self.done
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_utils.py
new file mode 100644
index 000000000..e77842535
--- /dev/null
+++ b/nova/virt/vmwareapi/network_utils.py
@@ -0,0 +1,149 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility functions for ESX Networking.
+"""
+
+from nova import exception
+from nova import log as logging
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+
+LOG = logging.getLogger("nova.virt.vmwareapi.network_utils")
+
+
+def get_network_with_the_name(session, network_name="vmnet0"):
+ """
+ Gets reference to the network whose name is passed as the
+ argument.
+ """
+ hostsystems = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["network"])
+ vm_networks_ret = hostsystems[0].propSet[0].val
+ # Meaning there are no networks on the host. suds responds with a ""
+ # in the parent property field rather than a [] in the
+ # ManagedObjectRefernce property field of the parent
+ if not vm_networks_ret:
+ return None
+ vm_networks = vm_networks_ret.ManagedObjectReference
+ networks = session._call_method(vim_util,
+ "get_properties_for_a_collection_of_objects",
+ "Network", vm_networks, ["summary.name"])
+ for network in networks:
+ if network.propSet[0].val == network_name:
+ return network.obj
+ return None
+
+
+def get_vswitch_for_vlan_interface(session, vlan_interface):
+ """
+ Gets the vswitch associated with the physical network adapter
+ with the name supplied.
+ """
+ # Get the list of vSwicthes on the Host System
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ vswitches_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "config.network.vswitch")
+ # Meaning there are no vSwitches on the host. Shouldn't be the case,
+ # but just doing code check
+ if not vswitches_ret:
+ return
+ vswitches = vswitches_ret.HostVirtualSwitch
+ # Get the vSwitch associated with the network adapter
+ for elem in vswitches:
+ try:
+ for nic_elem in elem.pnic:
+ if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
+ return elem.name
+ # Catching Attribute error as a vSwitch may not be associated with a
+ # physical NIC.
+ except AttributeError:
+ pass
+
+
+def check_if_vlan_interface_exists(session, vlan_interface):
+ """Checks if the vlan_inteface exists on the esx host."""
+ host_net_system_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["configManager.networkSystem"])[0].propSet[0].val
+ physical_nics_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_net_system_mor,
+ "HostNetworkSystem", "networkInfo.pnic")
+ # Meaning there are no physical nics on the host
+ if not physical_nics_ret:
+ return False
+ physical_nics = physical_nics_ret.PhysicalNic
+ for pnic in physical_nics:
+ if vlan_interface == pnic.device:
+ return True
+ return False
+
+
+def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
+ """Get the vlan id and vswicth associated with the port group."""
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ port_grps_on_host_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "config.network.portgroup")
+ if not port_grps_on_host_ret:
+ excep = ("ESX SOAP server returned an empty port group "
+ "for the host system in its response")
+ LOG.exception(excep)
+ raise exception.Error(_(excep))
+ port_grps_on_host = port_grps_on_host_ret.HostPortGroup
+ for p_gp in port_grps_on_host:
+ if p_gp.spec.name == pg_name:
+ p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
+ return p_gp.spec.vlanId, p_grp_vswitch_name
+
+
+def create_port_group(session, pg_name, vswitch_name, vlan_id=0):
+ """
+ Creates a port group on the host system with the vlan tags
+ supplied. VLAN id 0 means no vlan id association.
+ """
+ client_factory = session._get_vim().client.factory
+ add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec(
+ client_factory,
+ vswitch_name,
+ pg_name,
+ vlan_id)
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ network_system_mor = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "configManager.networkSystem")
+ LOG.debug(_("Creating Port Group with name %s on "
+ "the ESX host") % pg_name)
+ try:
+ session._call_method(session._get_vim(),
+ "AddPortGroup", network_system_mor,
+ portgrp=add_prt_grp_spec)
+ except error_util.VimFaultException, exc:
+ # There can be a race condition when two instances try
+ # adding port groups at the same time. One succeeds, then
+ # the other one will get an exception. Since we are
+ # concerned with the port group being created, which is done
+ # by the other call, we can ignore the exception.
+ if error_util.FAULT_ALREADY_EXISTS not in exc.fault_list:
+ raise exception.Error(exc)
+ LOG.debug(_("Created Port Group with name %s on "
+ "the ESX host") % pg_name)
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
new file mode 100644
index 000000000..84f4942eb
--- /dev/null
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -0,0 +1,182 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Classes to handle image files
+
+Collection of classes to handle image upload/download to/from Image service
+(like Glance image storage and retrieval service) from/to ESX/ESXi server.
+
+"""
+
+import httplib
+import urllib
+import urllib2
+import urlparse
+
+from eventlet import event
+from eventlet import greenthread
+
+from glance import client
+
+from nova import flags
+from nova import log as logging
+
+LOG = logging.getLogger("nova.virt.vmwareapi.read_write_util")
+
+FLAGS = flags.FLAGS
+
+USER_AGENT = "OpenStack-ESX-Adapter"
+
+try:
+ READ_CHUNKSIZE = client.BaseClient.CHUNKSIZE
+except AttributeError:
+ READ_CHUNKSIZE = 65536
+
+
+class GlanceFileRead(object):
+ """Glance file read handler class."""
+
+ def __init__(self, glance_read_iter):
+ self.glance_read_iter = glance_read_iter
+ self.iter = self.get_next()
+
+ def read(self, chunk_size):
+ """Read an item from the queue. The chunk size is ignored for the
+ Client ImageBodyIterator uses its own CHUNKSIZE."""
+ try:
+ return self.iter.next()
+ except StopIteration:
+ return ""
+
+ def get_next(self):
+ """Get the next item from the image iterator."""
+ for data in self.glance_read_iter:
+ yield data
+
+ def close(self):
+ """A dummy close just to maintain consistency."""
+ pass
+
+
+class VMwareHTTPFile(object):
+ """Base class for HTTP file."""
+
+ def __init__(self, file_handle):
+ self.eof = False
+ self.file_handle = file_handle
+
+ def set_eof(self, eof):
+ """Set the end of file marker."""
+ self.eof = eof
+
+ def get_eof(self):
+ """Check if the end of file has been reached."""
+ return self.eof
+
+ def close(self):
+ """Close the file handle."""
+ try:
+ self.file_handle.close()
+ except Exception, exc:
+ LOG.exception(exc)
+
+ def __del__(self):
+ """Close the file handle on garbage collection."""
+ self.close()
+
+ def _build_vim_cookie_headers(self, vim_cookies):
+ """Build ESX host session cookie headers."""
+ cookie_header = ""
+ for vim_cookie in vim_cookies:
+ cookie_header = vim_cookie.name + "=" + vim_cookie.value
+ break
+ return cookie_header
+
+ def write(self, data):
+ """Write data to the file."""
+ raise NotImplementedError
+
+ def read(self, chunk_size):
+ """Read a chunk of data."""
+ raise NotImplementedError
+
+ def get_size(self):
+ """Get size of the file to be read."""
+ raise NotImplementedError
+
+
+class VMWareHTTPWriteFile(VMwareHTTPFile):
+ """VMWare file write handler class."""
+
+ def __init__(self, host, data_center_name, datastore_name, cookies,
+ file_path, file_size, scheme="https"):
+ base_url = "%s://%s/folder/%s" % (scheme, host, file_path)
+ param_list = {"dcPath": data_center_name, "dsName": datastore_name}
+ base_url = base_url + "?" + urllib.urlencode(param_list)
+ (scheme, netloc, path, params, query, fragment) = \
+ urlparse.urlparse(base_url)
+ if scheme == "http":
+ conn = httplib.HTTPConnection(netloc)
+ elif scheme == "https":
+ conn = httplib.HTTPSConnection(netloc)
+ conn.putrequest("PUT", path + "?" + query)
+ conn.putheader("User-Agent", USER_AGENT)
+ conn.putheader("Content-Length", file_size)
+ conn.putheader("Cookie", self._build_vim_cookie_headers(cookies))
+ conn.endheaders()
+ self.conn = conn
+ VMwareHTTPFile.__init__(self, conn)
+
+ def write(self, data):
+ """Write to the file."""
+ self.file_handle.send(data)
+
+ def close(self):
+ """Get the response and close the connection."""
+ try:
+ self.conn.getresponse()
+ except Exception, excep:
+ LOG.debug(_("Exception during HTTP connection close in "
+ "VMWareHTTpWrite. Exception is %s") % excep)
+ super(VMWareHTTPWriteFile, self).close()
+
+
+class VmWareHTTPReadFile(VMwareHTTPFile):
+ """VMWare file read handler class."""
+
+ def __init__(self, host, data_center_name, datastore_name, cookies,
+ file_path, scheme="https"):
+ base_url = "%s://%s/folder/%s" % (scheme, host,
+ urllib.pathname2url(file_path))
+ param_list = {"dcPath": data_center_name, "dsName": datastore_name}
+ base_url = base_url + "?" + urllib.urlencode(param_list)
+ headers = {'User-Agent': USER_AGENT,
+ 'Cookie': self._build_vim_cookie_headers(cookies)}
+ request = urllib2.Request(base_url, None, headers)
+ conn = urllib2.urlopen(request)
+ VMwareHTTPFile.__init__(self, conn)
+
+ def read(self, chunk_size):
+ """Read a chunk of data."""
+ # We are ignoring the chunk size passed for we want the pipe to hold
+ # data items of the chunk-size that Glance Client uses for read
+ # while writing.
+ return self.file_handle.read(READ_CHUNKSIZE)
+
+ def get_size(self):
+ """Get size of the file to be read."""
+ return self.file_handle.headers.get("Content-Length", -1)
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
new file mode 100644
index 000000000..159e16a80
--- /dev/null
+++ b/nova/virt/vmwareapi/vim.py
@@ -0,0 +1,180 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Classes for making VMware VI SOAP calls.
+"""
+
+import httplib
+
+try:
+ import suds
+except ImportError:
+ suds = None
+
+from nova import flags
+from nova.virt.vmwareapi import error_util
+
+RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
+CONN_ABORT_ERROR = 'Software caused connection abort'
+ADDRESS_IN_USE_ERROR = 'Address already in use'
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vmwareapi_wsdl_loc',
+ None,
+ 'VIM Service WSDL Location'
+ 'e.g http://<server>/vimService.wsdl'
+ 'Due to a bug in vSphere ESX 4.1 default wsdl'
+ 'Refer readme-vmware to setup')
+
+
+if suds:
+ class VIMMessagePlugin(suds.plugin.MessagePlugin):
+
+ def addAttributeForValue(self, node):
+ # suds does not handle AnyType properly.
+ # VI SDK requires type attribute to be set when AnyType is used
+ if node.name == 'value':
+ node.set('xsi:type', 'xsd:string')
+
+ def marshalled(self, context):
+ """suds will send the specified soap envelope.
+ Provides the plugin with the opportunity to prune empty
+ nodes and fixup nodes before sending it to the server.
+ """
+ # suds builds the entire request object based on the wsdl schema.
+ # VI SDK throws server errors if optional SOAP nodes are sent
+ # without values, e.g. <test/> as opposed to <test>test</test>
+ context.envelope.prune()
+ context.envelope.walk(self.addAttributeForValue)
+
+
+class Vim:
+ """The VIM Object."""
+
+ def __init__(self,
+ protocol="https",
+ host="localhost"):
+ """
+ Creates the necessary Communication interfaces and gets the
+ ServiceContent for initiating SOAP transactions.
+
+ protocol: http or https
+ host : ESX IPAddress[:port] or ESX Hostname[:port]
+ """
+ if not suds:
+ raise Exception(_("Unable to import suds."))
+
+ self._protocol = protocol
+ self._host_name = host
+ wsdl_url = FLAGS.vmwareapi_wsdl_loc
+ if wsdl_url is None:
+ raise Exception(_("Must specify vmwareapi_wsdl_loc"))
+ # TODO(sateesh): Use this when VMware fixes their faulty wsdl
+ #wsdl_url = '%s://%s/sdk/vimService.wsdl' % (self._protocol,
+ # self._host_name)
+ url = '%s://%s/sdk' % (self._protocol, self._host_name)
+ self.client = suds.client.Client(wsdl_url, location=url,
+ plugins=[VIMMessagePlugin()])
+ self._service_content = \
+ self.RetrieveServiceContent("ServiceInstance")
+
+ def get_service_content(self):
+ """Gets the service content object."""
+ return self._service_content
+
+ def __getattr__(self, attr_name):
+ """Makes the API calls and gets the result."""
+ try:
+ return object.__getattr__(self, attr_name)
+ except AttributeError:
+
+ def vim_request_handler(managed_object, **kwargs):
+ """
+ Builds the SOAP message and parses the response for fault
+ checking and other errors.
+
+ managed_object : Managed Object Reference or Managed
+ Object Name
+ **kwargs : Keyword arguments of the call
+ """
+ # Dynamic handler for VI SDK Calls
+ try:
+ request_mo = \
+ self._request_managed_object_builder(managed_object)
+ request = getattr(self.client.service, attr_name)
+ response = request(request_mo, **kwargs)
+ # To check for the faults that are part of the message body
+ # and not returned as Fault object response from the ESX
+ # SOAP server
+ if hasattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker"):
+ fault_checker = getattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker")
+ fault_checker(response)
+ return response
+ # Catch the VimFaultException that is raised by the fault
+ # check of the SOAP response
+ except error_util.VimFaultException, excep:
+ raise
+ except suds.WebFault, excep:
+ doc = excep.document
+ detail = doc.childAtPath("/Envelope/Body/Fault/detail")
+ fault_list = []
+ for child in detail.getChildren():
+ fault_list.append(child.get("type"))
+ raise error_util.VimFaultException(fault_list, excep)
+ except AttributeError, excep:
+ raise error_util.VimAttributeError(_("No such SOAP method "
+ "'%s' provided by VI SDK") % (attr_name), excep)
+ except (httplib.CannotSendRequest,
+ httplib.ResponseNotReady,
+ httplib.CannotSendHeader), excep:
+ raise error_util.SessionOverLoadException(_("httplib "
+ "error in %s: ") % (attr_name), excep)
+ except Exception, excep:
+ # Socket errors which need special handling for they
+ # might be caused by ESX API call overload
+ if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
+ str(excep).find(CONN_ABORT_ERROR)) != -1:
+ raise error_util.SessionOverLoadException(_("Socket "
+ "error in %s: ") % (attr_name), excep)
+ # Type error that needs special handling for it might be
+ # caused by ESX host API call overload
+ elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
+ raise error_util.SessionOverLoadException(_("Type "
+ "error in %s: ") % (attr_name), excep)
+ else:
+ raise error_util.VimException(
+ _("Exception in %s ") % (attr_name), excep)
+ return vim_request_handler
+
+ def _request_managed_object_builder(self, managed_object):
+ """Builds the request managed object."""
+ # Request Managed Object Builder
+ if type(managed_object) == type(""):
+ mo = suds.sudsobject.Property(managed_object)
+ mo._type = managed_object
+ else:
+ mo = managed_object
+ return mo
+
+ def __repr__(self):
+ return "VIM Object"
+
+ def __str__(self):
+ return "VIM Object"
diff --git a/nova/virt/vmwareapi/vim_util.py b/nova/virt/vmwareapi/vim_util.py
new file mode 100644
index 000000000..11214231c
--- /dev/null
+++ b/nova/virt/vmwareapi/vim_util.py
@@ -0,0 +1,217 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The VMware API utility module.
+"""
+
+
+def build_selection_spec(client_factory, name):
+ """Builds the selection spec."""
+ sel_spec = client_factory.create('ns0:SelectionSpec')
+ sel_spec.name = name
+ return sel_spec
+
+
+def build_traversal_spec(client_factory, name, spec_type, path, skip,
+ select_set):
+ """Builds the traversal spec object."""
+ traversal_spec = client_factory.create('ns0:TraversalSpec')
+ traversal_spec.name = name
+ traversal_spec.type = spec_type
+ traversal_spec.path = path
+ traversal_spec.skip = skip
+ traversal_spec.selectSet = select_set
+ return traversal_spec
+
+
+def build_recursive_traversal_spec(client_factory):
+ """
+ Builds the Recursive Traversal Spec to traverse the object managed
+ object hierarchy.
+ """
+ visit_folders_select_spec = build_selection_spec(client_factory,
+ "visitFolders")
+ # For getting to hostFolder from datacenter
+ dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
+ "hostFolder", False,
+ [visit_folders_select_spec])
+ # For getting to vmFolder from datacenter
+ dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
+ "vmFolder", False,
+ [visit_folders_select_spec])
+ # For getting Host System to virtual machine
+ h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
+ "vm", False,
+ [visit_folders_select_spec])
+
+ # For getting to Host System from Compute Resource
+ cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
+ "ComputeResource", "host", False, [])
+
+ # For getting to datastore from Compute Resource
+ cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
+ "ComputeResource", "datastore", False, [])
+
+ rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp")
+ rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm")
+ # For getting to resource pool from Compute Resource
+ cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
+ "ComputeResource", "resourcePool", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # For getting to child res pool from the parent res pool
+ rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
+ "resourcePool", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # For getting to Virtual Machine from the Resource Pool
+ rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
+ "vm", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # Get the assorted traversal spec which takes care of the objects to
+ # be searched for from the root folder
+ traversal_spec = build_traversal_spec(client_factory, "visitFolders",
+ "Folder", "childEntity", False,
+ [visit_folders_select_spec, dc_to_hf,
+ dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,
+ rp_to_rp, h_to_vm, rp_to_vm])
+ return traversal_spec
+
+
+def build_property_spec(client_factory, type="VirtualMachine",
+ properties_to_collect=["name"],
+ all_properties=False):
+ """Builds the Property Spec."""
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = all_properties
+ property_spec.pathSet = properties_to_collect
+ property_spec.type = type
+ return property_spec
+
+
+def build_object_spec(client_factory, root_folder, traversal_specs):
+ """Builds the object Spec."""
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = root_folder
+ object_spec.skip = False
+ object_spec.selectSet = traversal_specs
+ return object_spec
+
+
+def build_property_filter_spec(client_factory, property_specs, object_specs):
+ """Builds the Property Filter Spec."""
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_filter_spec.propSet = property_specs
+ property_filter_spec.objectSet = object_specs
+ return property_filter_spec
+
+
+def get_object_properties(vim, collector, mobj, type, properties):
+ """Gets the properties of the Managed object specified."""
+ client_factory = vim.client.factory
+ if mobj is None:
+ return None
+ usecoll = collector
+ if usecoll is None:
+ usecoll = vim.get_service_content().propertyCollector
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = (properties is None or len(properties) == 0)
+ property_spec.pathSet = properties
+ property_spec.type = type
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = mobj
+ object_spec.skip = False
+ property_filter_spec.propSet = [property_spec]
+ property_filter_spec.objectSet = [object_spec]
+ return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
+
+
+def get_dynamic_property(vim, mobj, type, property_name):
+ """Gets a particular property of the Managed Object."""
+ obj_content = \
+ get_object_properties(vim, None, mobj, type, [property_name])
+ property_value = None
+ if obj_content:
+ dynamic_property = obj_content[0].propSet
+ if dynamic_property:
+ property_value = dynamic_property[0].val
+ return property_value
+
+
+def get_objects(vim, type, properties_to_collect=["name"], all=False):
+ """Gets the list of objects of the type specified."""
+ client_factory = vim.client.factory
+ object_spec = build_object_spec(client_factory,
+ vim.get_service_content().rootFolder,
+ [build_recursive_traversal_spec(client_factory)])
+ property_spec = build_property_spec(client_factory, type=type,
+ properties_to_collect=properties_to_collect,
+ all_properties=all)
+ property_filter_spec = build_property_filter_spec(client_factory,
+ [property_spec],
+ [object_spec])
+ return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
+ specSet=[property_filter_spec])
+
+
+def get_prop_spec(client_factory, spec_type, properties):
+ """Builds the Property Spec Object."""
+ prop_spec = client_factory.create('ns0:PropertySpec')
+ prop_spec.type = spec_type
+ prop_spec.pathSet = properties
+ return prop_spec
+
+
+def get_obj_spec(client_factory, obj, select_set=None):
+ """Builds the Object Spec object."""
+ obj_spec = client_factory.create('ns0:ObjectSpec')
+ obj_spec.obj = obj
+ obj_spec.skip = False
+ if select_set is not None:
+ obj_spec.selectSet = select_set
+ return obj_spec
+
+
+def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
+ """Builds the Property Filter Spec Object."""
+ prop_filter_spec = \
+ client_factory.create('ns0:PropertyFilterSpec')
+ prop_filter_spec.propSet = prop_spec
+ prop_filter_spec.objectSet = obj_spec
+ return prop_filter_spec
+
+
+def get_properties_for_a_collection_of_objects(vim, type,
+ obj_list, properties):
+ """
+ Gets the list of properties for the collection of
+ objects of the type specified.
+ """
+ client_factory = vim.client.factory
+ if len(obj_list) == 0:
+ return []
+ prop_spec = get_prop_spec(client_factory, type, properties)
+ lst_obj_specs = []
+ for obj in obj_list:
+ lst_obj_specs.append(get_obj_spec(client_factory, obj))
+ prop_filter_spec = get_prop_filter_spec(client_factory,
+ lst_obj_specs, [prop_spec])
+ return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
+ specSet=[prop_filter_spec])
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
new file mode 100644
index 000000000..a2fa7600c
--- /dev/null
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -0,0 +1,306 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+The VMware API VM utility module to build SOAP object specs.
+"""
+
+
+def build_datastore_path(datastore_name, path):
+ """Build the datastore compliant path."""
+ return "[%s] %s" % (datastore_name, path)
+
+
+def split_datastore_path(datastore_path):
+ """
+ Split the VMWare style datastore path to get the Datastore
+ name and the entity path.
+ """
+ spl = datastore_path.split('[', 1)[1].split(']', 1)
+ path = ""
+ if len(spl) == 1:
+ datastore_url = spl[0]
+ else:
+ datastore_url, path = spl
+ return datastore_url, path.strip()
+
+
+def get_vm_create_spec(client_factory, instance, data_store_name,
+ network_name="vmnet0",
+ os_type="otherGuest"):
+ """Builds the VM Create spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+ config_spec.name = instance.name
+ config_spec.guestId = os_type
+
+ vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
+ vm_file_info.vmPathName = "[" + data_store_name + "]"
+ config_spec.files = vm_file_info
+
+ tools_info = client_factory.create('ns0:ToolsConfigInfo')
+ tools_info.afterPowerOn = True
+ tools_info.afterResume = True
+ tools_info.beforeGuestStandby = True
+ tools_info.beforeGuestShutdown = True
+ tools_info.beforeGuestReboot = True
+
+ config_spec.tools = tools_info
+ config_spec.numCPUs = int(instance.vcpus)
+ config_spec.memoryMB = int(instance.memory_mb)
+
+ nic_spec = create_network_spec(client_factory,
+ network_name, instance.mac_address)
+
+ device_config_spec = [nic_spec]
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def create_controller_spec(client_factory, key):
+ """
+ Builds a Config Spec for the LSI Logic Controller's addition
+ which acts as the controller for the virtual hard disk to be attached
+ to the VM.
+ """
+ # Create a controller for the Virtual Hard Disk
+ virtual_device_config = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "add"
+ virtual_lsi = \
+ client_factory.create('ns0:VirtualLsiLogicController')
+ virtual_lsi.key = key
+ virtual_lsi.busNumber = 0
+ virtual_lsi.sharedBus = "noSharing"
+ virtual_device_config.device = virtual_lsi
+ return virtual_device_config
+
+
+def create_network_spec(client_factory, network_name, mac_address):
+ """
+ Builds a config spec for the addition of a new network
+ adapter to the VM.
+ """
+ network_spec = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ network_spec.operation = "add"
+
+ # Get the recommended card type for the VM based on the guest OS of the VM
+ net_device = client_factory.create('ns0:VirtualPCNet32')
+
+ backing = \
+ client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
+ backing.deviceName = network_name
+
+ connectable_spec = \
+ client_factory.create('ns0:VirtualDeviceConnectInfo')
+ connectable_spec.startConnected = True
+ connectable_spec.allowGuestControl = True
+ connectable_spec.connected = True
+
+ net_device.connectable = connectable_spec
+ net_device.backing = backing
+
+ # The Server assigns a Key to the device. Here we pass a -ve temporary key.
+ # -ve because actual keys are +ve numbers and we don't
+ # want a clash with the key that server might associate with the device
+ net_device.key = -47
+ net_device.addressType = "manual"
+ net_device.macAddress = mac_address
+ net_device.wakeOnLanEnabled = True
+
+ network_spec.device = net_device
+ return network_spec
+
+
+def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
+ adapter_type="lsiLogic"):
+ """Builds the vmdk attach config spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ # The controller Key pertains to the Key of the LSI Logic Controller, which
+ # controls this Hard Disk
+ device_config_spec = []
+ # For IDE devices, there are these two default controllers created in the
+ # VM having keys 200 and 201
+ if adapter_type == "ide":
+ controller_key = 200
+ else:
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory,
+ controller_key)
+ device_config_spec.append(controller_spec)
+ virtual_device_config_spec = create_virtual_disk_spec(client_factory,
+ disksize, controller_key, file_path)
+
+ device_config_spec.append(virtual_device_config_spec)
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices):
+ """Gets the vmdk file path and the storage adapter type."""
+ if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
+ hardware_devices = hardware_devices.VirtualDevice
+ vmdk_file_path = None
+ vmdk_controler_key = None
+
+ adapter_type_dict = {}
+ for device in hardware_devices:
+ if device.__class__.__name__ == "VirtualDisk" and \
+ device.backing.__class__.__name__ \
+ == "VirtualDiskFlatVer2BackingInfo":
+ vmdk_file_path = device.backing.fileName
+ vmdk_controler_key = device.controllerKey
+ elif device.__class__.__name__ == "VirtualLsiLogicController":
+ adapter_type_dict[device.key] = "lsiLogic"
+ elif device.__class__.__name__ == "VirtualBusLogicController":
+ adapter_type_dict[device.key] = "busLogic"
+ elif device.__class__.__name__ == "VirtualIDEController":
+ adapter_type_dict[device.key] = "ide"
+ elif device.__class__.__name__ == "VirtualLsiLogicSASController":
+ adapter_type_dict[device.key] = "lsiLogic"
+
+ adapter_type = adapter_type_dict.get(vmdk_controler_key, "")
+
+ return vmdk_file_path, adapter_type
+
+
+def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic"):
+ """Builds the Virtual Disk copy spec."""
+ dest_spec = client_factory.create('ns0:VirtualDiskSpec')
+ dest_spec.adapterType = adapter_type
+ dest_spec.diskType = "thick"
+ return dest_spec
+
+
+def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic"):
+ """Builds the virtual disk create spec."""
+ create_vmdk_spec = \
+ client_factory.create('ns0:FileBackedVirtualDiskSpec')
+ create_vmdk_spec.adapterType = adapter_type
+ create_vmdk_spec.diskType = "thick"
+ create_vmdk_spec.capacityKb = size_in_kb
+ return create_vmdk_spec
+
+
+def create_virtual_disk_spec(client_factory, disksize, controller_key,
+ file_path=None):
+ """
+ Builds spec for the creation of a new/ attaching of an already existing
+ Virtual Disk to the VM.
+ """
+ virtual_device_config = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "add"
+ if file_path is None:
+ virtual_device_config.fileOperation = "create"
+
+ virtual_disk = client_factory.create('ns0:VirtualDisk')
+
+ disk_file_backing = \
+ client_factory.create('ns0:VirtualDiskFlatVer2BackingInfo')
+ disk_file_backing.diskMode = "persistent"
+ disk_file_backing.thinProvisioned = False
+ if file_path is not None:
+ disk_file_backing.fileName = file_path
+ else:
+ disk_file_backing.fileName = ""
+
+ connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
+ connectable_spec.startConnected = True
+ connectable_spec.allowGuestControl = False
+ connectable_spec.connected = True
+
+ virtual_disk.backing = disk_file_backing
+ virtual_disk.connectable = connectable_spec
+
+ # The Server assigns a Key to the device. Here we pass a -ve random key.
+ # -ve because actual keys are +ve numbers and we don't
+ # want a clash with the key that server might associate with the device
+ virtual_disk.key = -100
+ virtual_disk.controllerKey = controller_key
+ virtual_disk.unitNumber = 0
+ virtual_disk.capacityInKB = disksize
+
+ virtual_device_config.device = virtual_disk
+
+ return virtual_device_config
+
+
+def get_dummy_vm_create_spec(client_factory, name, data_store_name):
+ """Builds the dummy VM create spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ config_spec.name = name
+ config_spec.guestId = "otherGuest"
+
+ vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
+ vm_file_info.vmPathName = "[" + data_store_name + "]"
+ config_spec.files = vm_file_info
+
+ tools_info = client_factory.create('ns0:ToolsConfigInfo')
+ tools_info.afterPowerOn = True
+ tools_info.afterResume = True
+ tools_info.beforeGuestStandby = True
+ tools_info.beforeGuestShutdown = True
+ tools_info.beforeGuestReboot = True
+
+ config_spec.tools = tools_info
+ config_spec.numCPUs = 1
+ config_spec.memoryMB = 4
+
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory, controller_key)
+ disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
+
+ device_config_spec = [controller_spec, disk_spec]
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask, gateway):
+ """Builds the machine id change config spec."""
+ machine_id_str = "%s;%s;%s;%s" % (mac, ip_addr, netmask, gateway)
+ virtual_machine_config_spec = \
+ client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ opt = client_factory.create('ns0:OptionValue')
+ opt.key = "machine.id"
+ opt.value = machine_id_str
+ virtual_machine_config_spec.extraConfig = [opt]
+ return virtual_machine_config_spec
+
+
+def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
+ port_group_name, vlan_id):
+ """Builds the virtual switch port group add spec."""
+ vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
+ vswitch_port_group_spec.name = port_group_name
+ vswitch_port_group_spec.vswitchName = vswitch_name
+
+ # VLAN ID of 0 means that VLAN tagging is not to be done for the network.
+ vswitch_port_group_spec.vlanId = int(vlan_id)
+
+ policy = client_factory.create('ns0:HostNetworkPolicy')
+ nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
+ nicteaming.notifySwitches = True
+ policy.nicTeaming = nicteaming
+
+ vswitch_port_group_spec.policy = policy
+ return vswitch_port_group_spec
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
new file mode 100644
index 000000000..cf6c88bbd
--- /dev/null
+++ b/nova/virt/vmwareapi/vmops.py
@@ -0,0 +1,789 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Class for VM tasks like spawn, snapshot, suspend, resume etc.
+"""
+
+import base64
+import os
+import time
+import urllib
+import urllib2
+import uuid
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.compute import power_state
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmware_images
+from nova.virt.vmwareapi import network_utils
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.vmwareapi.vmops")
+
+VMWARE_POWER_STATES = {
+ 'poweredOff': power_state.SHUTDOWN,
+ 'poweredOn': power_state.RUNNING,
+ 'suspended': power_state.PAUSED}
+
+
+class VMWareVMOps(object):
+ """Management class for VM-related tasks."""
+
+ def __init__(self, session):
+ """Initializer."""
+ self._session = session
+
+ def _wait_with_callback(self, instance_id, task, callback):
+ """Waits for the task to finish and does a callback after."""
+ ret = None
+ try:
+ ret = self._session._wait_for_task(instance_id, task)
+ except Exception, excep:
+ LOG.exception(excep)
+ callback(ret)
+
+ def list_instances(self):
+ """Lists the VM instances that are registered with the ESX host."""
+ LOG.debug(_("Getting list of instances"))
+ vms = self._session._call_method(vim_util, "get_objects",
+ "VirtualMachine",
+ ["name", "runtime.connectionState"])
+ lst_vm_names = []
+ for vm in vms:
+ vm_name = None
+ conn_state = None
+ for prop in vm.propSet:
+ if prop.name == "name":
+ vm_name = prop.val
+ elif prop.name == "runtime.connectionState":
+ conn_state = prop.val
+ # Ignoring the oprhaned or inaccessible VMs
+ if conn_state not in ["orphaned", "inaccessible"]:
+ lst_vm_names.append(vm_name)
+ LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
+ return lst_vm_names
+
+ def spawn(self, instance):
+ """
+ Creates a VM instance.
+
+ Steps followed are:
+ 1. Create a VM with no disk and the specifics in the instance object
+ like RAM size.
+ 2. Create a dummy vmdk of the size of the disk file that is to be
+ uploaded. This is required just to create the metadata file.
+ 3. Delete the -flat.vmdk file created in the above step and retain
+ the metadata .vmdk file.
+ 4. Upload the disk file.
+ 5. Attach the disk to the VM by reconfiguring the same.
+ 6. Power on the VM.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref:
+ raise exception.Duplicate(_("Attempted to create a VM with a name"
+ " %s, but that already exists on the host") % instance.name)
+
+ client_factory = self._session._get_vim().client.factory
+ service_content = self._session._get_vim().get_service_content()
+
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+
+ net_name = network['bridge']
+
+ def _check_if_network_bridge_exists():
+ network_ref = \
+ network_utils.get_network_with_the_name(self._session,
+ net_name)
+ if network_ref is None:
+ raise exception.NotFound(_("Network with the name '%s' doesn't"
+ " exist on the ESX host") % net_name)
+
+ _check_if_network_bridge_exists()
+
+ def _get_datastore_ref():
+ """Get the datastore list and choose the first local storage."""
+ data_stores = self._session._call_method(vim_util, "get_objects",
+ "Datastore", ["summary.type", "summary.name"])
+ for elem in data_stores:
+ ds_name = None
+ ds_type = None
+ for prop in elem.propSet:
+ if prop.name == "summary.type":
+ ds_type = prop.val
+ elif prop.name == "summary.name":
+ ds_name = prop.val
+ # Local storage identifier
+ if ds_type == "VMFS":
+ data_store_name = ds_name
+ return data_store_name
+
+ if data_store_name is None:
+ msg = _("Couldn't get a local Datastore reference")
+ LOG.exception(msg)
+ raise exception.Error(msg)
+
+ data_store_name = _get_datastore_ref()
+
+ def _get_image_properties():
+ """
+ Get the Size of the flat vmdk file that is there on the storage
+ repository.
+ """
+ image_size, image_properties = \
+ vmware_images.get_vmdk_size_and_properties(
+ instance.image_id, instance)
+ vmdk_file_size_in_kb = int(image_size) / 1024
+ os_type = image_properties.get("vmware_ostype", "otherGuest")
+ adapter_type = image_properties.get("vmware_adaptertype",
+ "lsiLogic")
+ return vmdk_file_size_in_kb, os_type, adapter_type
+
+ vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()
+
+ def _get_vmfolder_and_res_pool_mors():
+ """Get the Vm folder ref from the datacenter."""
+ dc_objs = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["vmFolder"])
+ # There is only one default datacenter in a standalone ESX host
+ vm_folder_mor = dc_objs[0].propSet[0].val
+
+ # Get the resource pool. Taking the first resource pool coming our
+ # way. Assuming that is the default resource pool.
+ res_pool_mor = self._session._call_method(vim_util, "get_objects",
+ "ResourcePool")[0].obj
+ return vm_folder_mor, res_pool_mor
+
+ vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
+
+ # Get the create vm config spec
+ config_spec = vm_util.get_vm_create_spec(client_factory, instance,
+ data_store_name, net_name, os_type)
+
+ def _execute_create_vm():
+ """Create VM on ESX host."""
+ LOG.debug(_("Creating VM with the name %s on the ESX host") %
+ instance.name)
+ # Create the VM on the ESX host
+ vm_create_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateVM_Task", vm_folder_mor,
+ config=config_spec, pool=res_pool_mor)
+ self._session._wait_for_task(instance.id, vm_create_task)
+
+ LOG.debug(_("Created VM with the name %s on the ESX host") %
+ instance.name)
+
+ _execute_create_vm()
+
+ # Set the machine id for the VM for setting the IP
+ self._set_machine_id(client_factory, instance)
+
+ # Naming the VM files in correspondence with the VM instance name
+ # The flat vmdk file name
+ flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
+ instance.name)
+ # The vmdk meta-data file
+ uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name)
+ flat_uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ flat_uploaded_vmdk_name)
+ uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ uploaded_vmdk_name)
+
+ def _create_virtual_disk():
+ """Create a virtual disk of the size of flat vmdk file."""
+ # Create a Virtual Disk of the size of the flat vmdk file. This is
+ # done just to generate the meta-data file whose specifics
+ # depend on the size of the disk, thin/thick provisioning and the
+ # storage adapter type.
+ # Here we assume thick provisioning and lsiLogic for the adapter
+ # type
+ LOG.debug(_("Creating Virtual Disk of size "
+ "%(vmdk_file_size_in_kb)s KB and adapter type "
+ "%(adapter_type)s on the ESX host local store"
+ " %(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "adapter_type": adapter_type,
+ "data_store_name": data_store_name})
+ vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
+ vmdk_file_size_in_kb, adapter_type)
+ vmdk_create_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ name=uploaded_vmdk_path,
+ datacenter=self._get_datacenter_name_and_ref()[0],
+ spec=vmdk_create_spec)
+ self._session._wait_for_task(instance.id, vmdk_create_task)
+ LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
+ " KB on the ESX host local store "
+ "%(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "data_store_name": data_store_name})
+
+ _create_virtual_disk()
+
+ def _delete_disk_file():
+ LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
+ "on the ESX host local"
+ "store %(data_store_name)s") %
+ {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ "data_store_name": data_store_name})
+ # Delete the -flat.vmdk file created. .vmdk file is retained.
+ vmdk_delete_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteDatastoreFile_Task",
+ service_content.fileManager,
+ name=flat_uploaded_vmdk_path)
+ self._session._wait_for_task(instance.id, vmdk_delete_task)
+ LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
+ "ESX host local store %(data_store_name)s") %
+ {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ "data_store_name": data_store_name})
+
+ _delete_disk_file()
+
+ cookies = self._session._get_vim().client.options.transport.cookiejar
+
+ def _fetch_image_on_esx_datastore():
+ """Fetch image from Glance to ESX datastore."""
+ LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
+ "data store %(data_store_name)s") %
+ ({'image_id': instance.image_id,
+ 'data_store_name': data_store_name}))
+ # Upload the -flat.vmdk file whose meta-data file we just created
+ # above
+ vmware_images.fetch_image(
+ instance.image_id,
+ instance,
+ host=self._session._host_ip,
+ data_center_name=self._get_datacenter_name_and_ref()[1],
+ datastore_name=data_store_name,
+ cookies=cookies,
+ file_path=flat_uploaded_vmdk_name)
+ LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
+ "data store %(data_store_name)s") %
+ ({'image_id': instance.image_id,
+ 'data_store_name': data_store_name}))
+ _fetch_image_on_esx_datastore()
+
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+
+ def _attach_vmdk_to_the_vm():
+ """
+ Attach the vmdk uploaded to the VM. VM reconfigure is done
+ to do so.
+ """
+ vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
+ client_factory,
+ vmdk_file_size_in_kb, uploaded_vmdk_path,
+ adapter_type)
+ LOG.debug(_("Reconfiguring VM instance %s to attach the image "
+ "disk") % instance.name)
+ reconfig_task = self._session._call_method(
+ self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vmdk_attach_config_spec)
+ self._session._wait_for_task(instance.id, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %s to attach the image "
+ "disk") % instance.name)
+
+ _attach_vmdk_to_the_vm()
+
+ def _power_on_vm():
+ """Power on the VM."""
+ LOG.debug(_("Powering on the VM instance %s") % instance.name)
+ # Power On the VM
+ power_on_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, power_on_task)
+ LOG.debug(_("Powered on the VM instance %s") % instance.name)
+ _power_on_vm()
+
+ def snapshot(self, instance, snapshot_name):
+ """
+ Create snapshot from a running VM instance.
+ Steps followed are:
+ 1. Get the name of the vmdk file which the VM points to right now.
+ Can be a chain of snapshots, so we need to know the last in the
+ chain.
+ 2. Create the snapshot. A new vmdk is created which the VM points to
+ now. The earlier vmdk becomes read-only.
+ 3. Call CopyVirtualDisk which coalesces the disk chain to form a single
+ vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
+ 4. Now upload the -flat.vmdk file to the image store.
+ 5. Delete the coalesced .vmdk and -flat.vmdk created.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ client_factory = self._session._get_vim().client.factory
+ service_content = self._session._get_vim().get_service_content()
+
+ def _get_vm_and_vmdk_attribs():
+ # Get the vmdk file name that the VM is pointing to
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ vmdk_file_path_before_snapshot, adapter_type = \
+ vm_util.get_vmdk_file_path_and_adapter_type(client_factory,
+ hardware_devices)
+ datastore_name = vm_util.split_datastore_path(
+ vmdk_file_path_before_snapshot)[0]
+ os_type = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "summary.config.guestId")
+ return (vmdk_file_path_before_snapshot, adapter_type,
+ datastore_name, os_type)
+
+ vmdk_file_path_before_snapshot, adapter_type, datastore_name,\
+ os_type = _get_vm_and_vmdk_attribs()
+
+ def _create_vm_snapshot():
+ # Create a snapshot of the VM
+ LOG.debug(_("Creating Snapshot of the VM instance %s ") %
+ instance.name)
+ snapshot_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateSnapshot_Task", vm_ref,
+ name="%s-snapshot" % instance.name,
+ description="Taking Snapshot of the VM",
+ memory=True,
+ quiesce=True)
+ self._session._wait_for_task(instance.id, snapshot_task)
+ LOG.debug(_("Created Snapshot of the VM instance %s ") %
+ instance.name)
+
+ _create_vm_snapshot()
+
+ def _check_if_tmp_folder_exists():
+ # Copy the contents of the VM that were there just before the
+ # snapshot was taken
+ ds_ref_ret = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ vm_ref,
+ "VirtualMachine",
+ "datastore")
+ if not ds_ref_ret:
+ raise exception.NotFound(_("Failed to get the datastore "
+ "reference(s) which the VM uses"))
+ ds_ref = ds_ref_ret.ManagedObjectReference[0]
+ ds_browser = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ ds_ref,
+ "Datastore",
+ "browser")
+ # Check if the vmware-tmp folder exists or not. If not, create one
+ tmp_folder_path = vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp")
+ if not self._path_exists(ds_browser, tmp_folder_path):
+ self._mkdir(vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp"))
+
+ _check_if_tmp_folder_exists()
+
+ # Generate a random vmdk file name to which the coalesced vmdk content
+ # will be copied to. A random name is chosen so that we don't have
+ # name clashes.
+ random_name = str(uuid.uuid4())
+ dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp/%s.vmdk" % random_name)
+ dc_ref = self._get_datacenter_name_and_ref()[0]
+
+ def _copy_vmdk_content():
+ # Copy the contents of the disk ( or disks, if there were snapshots
+ # done earlier) to a temporary vmdk file.
+ copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
+ adapter_type)
+ LOG.debug(_("Copying disk data before snapshot of the VM "
+ " instance %s") % instance.name)
+ copy_disk_task = self._session._call_method(
+ self._session._get_vim(),
+ "CopyVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ sourceName=vmdk_file_path_before_snapshot,
+ sourceDatacenter=dc_ref,
+ destName=dest_vmdk_file_location,
+ destDatacenter=dc_ref,
+ destSpec=copy_spec,
+ force=False)
+ self._session._wait_for_task(instance.id, copy_disk_task)
+ LOG.debug(_("Copied disk data before snapshot of the VM "
+ "instance %s") % instance.name)
+
+ _copy_vmdk_content()
+
+ cookies = self._session._get_vim().client.options.transport.cookiejar
+
+ def _upload_vmdk_to_image_repository():
+ # Upload the contents of -flat.vmdk file which has the disk data.
+ LOG.debug(_("Uploading image %s") % snapshot_name)
+ vmware_images.upload_image(
+ snapshot_name,
+ instance,
+ os_type=os_type,
+ adapter_type=adapter_type,
+ image_version=1,
+ host=self._session._host_ip,
+ data_center_name=self._get_datacenter_name_and_ref()[1],
+ datastore_name=datastore_name,
+ cookies=cookies,
+ file_path="vmware-tmp/%s-flat.vmdk" % random_name)
+ LOG.debug(_("Uploaded image %s") % snapshot_name)
+
+ _upload_vmdk_to_image_repository()
+
+ def _clean_temp_data():
+ """
+ Delete temporary vmdk files generated in image handling
+ operations.
+ """
+ # Delete the temporary vmdk created above.
+ LOG.debug(_("Deleting temporary vmdk file %s")
+ % dest_vmdk_file_location)
+ remove_disk_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ name=dest_vmdk_file_location,
+ datacenter=dc_ref)
+ self._session._wait_for_task(instance.id, remove_disk_task)
+ LOG.debug(_("Deleted temporary vmdk file %s")
+ % dest_vmdk_file_location)
+
+ _clean_temp_data()
+
+ def reboot(self, instance):
+ """Reboot a VM instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
+ "summary.guest.toolsRunningStatus"]
+ props = self._session._call_method(vim_util, "get_object_properties",
+ None, vm_ref, "VirtualMachine",
+ lst_properties)
+ pwr_state = None
+ tools_status = None
+ tools_running_status = False
+ for elem in props:
+ for prop in elem.propSet:
+ if prop.name == "runtime.powerState":
+ pwr_state = prop.val
+ elif prop.name == "summary.guest.toolsStatus":
+ tools_status = prop.val
+ elif prop.name == "summary.guest.toolsRunningStatus":
+ tools_running_status = prop.val
+
+ # Raise an exception if the VM is not powered On.
+ if pwr_state not in ["poweredOn"]:
+ raise exception.Invalid(_("instance - %s not poweredOn. So can't "
+ "be rebooted.") % instance.name)
+
+ # If latest vmware tools are installed in the VM, and that the tools
+ # are running, then only do a guest reboot. Otherwise do a hard reset.
+ if (tools_status == "toolsOk" and
+ tools_running_status == "guestToolsRunning"):
+ LOG.debug(_("Rebooting guest OS of VM %s") % instance.name)
+ self._session._call_method(self._session._get_vim(), "RebootGuest",
+ vm_ref)
+ LOG.debug(_("Rebooted guest OS of VM %s") % instance.name)
+ else:
+ LOG.debug(_("Doing hard reboot of VM %s") % instance.name)
+ reset_task = self._session._call_method(self._session._get_vim(),
+ "ResetVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, reset_task)
+ LOG.debug(_("Did hard reboot of VM %s") % instance.name)
+
+ def destroy(self, instance):
+ """
+ Destroy a VM instance. Steps followed are:
+ 1. Power off the VM, if it is in poweredOn state.
+ 2. Un-register a VM.
+ 3. Delete the contents of the folder holding the VM related data.
+ """
+ try:
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ LOG.debug(_("instance - %s not present") % instance.name)
+ return
+ lst_properties = ["config.files.vmPathName", "runtime.powerState"]
+ props = self._session._call_method(vim_util,
+ "get_object_properties",
+ None, vm_ref, "VirtualMachine", lst_properties)
+ pwr_state = None
+ for elem in props:
+ vm_config_pathname = None
+ for prop in elem.propSet:
+ if prop.name == "runtime.powerState":
+ pwr_state = prop.val
+ elif prop.name == "config.files.vmPathName":
+ vm_config_pathname = prop.val
+ if vm_config_pathname:
+ datastore_name, vmx_file_path = \
+ vm_util.split_datastore_path(vm_config_pathname)
+ # Power off the VM if it is in PoweredOn state.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Powering off the VM %s") % instance.name)
+ poweroff_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOffVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, poweroff_task)
+ LOG.debug(_("Powered off the VM %s") % instance.name)
+
+ # Un-register the VM
+ try:
+ LOG.debug(_("Unregistering the VM %s") % instance.name)
+ self._session._call_method(self._session._get_vim(),
+ "UnregisterVM", vm_ref)
+ LOG.debug(_("Unregistered the VM %s") % instance.name)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
+ " while un-registering the VM: %s") % str(excep))
+
+ # Delete the folder holding the VM related content on
+ # the datastore.
+ try:
+ dir_ds_compliant_path = vm_util.build_datastore_path(
+ datastore_name,
+ os.path.dirname(vmx_file_path))
+ LOG.debug(_("Deleting contents of the VM %(name)s from "
+ "datastore %(datastore_name)s") %
+ ({'name': instance.name,
+ 'datastore_name': datastore_name}))
+ delete_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteDatastoreFile_Task",
+ self._session._get_vim().get_service_content().fileManager,
+ name=dir_ds_compliant_path)
+ self._session._wait_for_task(instance.id, delete_task)
+ LOG.debug(_("Deleted contents of the VM %(name)s from "
+ "datastore %(datastore_name)s") %
+ ({'name': instance.name,
+ 'datastore_name': datastore_name}))
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, "
+ "got this exception while deleting"
+ " the VM contents from the disk: %s")
+ % str(excep))
+ except Exception, exc:
+ LOG.exception(exc)
+
+ def pause(self, instance, callback):
+ """Pause a VM instance."""
+ raise exception.APIError("pause not supported for vmwareapi")
+
+ def unpause(self, instance, callback):
+ """Un-Pause a VM instance."""
+ raise exception.APIError("unpause not supported for vmwareapi")
+
+ def suspend(self, instance, callback):
+ """Suspend the specified instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ # Only PoweredOn VMs can be suspended.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Suspending the VM %s ") % instance.name)
+ suspend_task = self._session._call_method(self._session._get_vim(),
+ "SuspendVM_Task", vm_ref)
+ self._wait_with_callback(instance.id, suspend_task, callback)
+ LOG.debug(_("Suspended the VM %s ") % instance.name)
+ # Raise Exception if VM is poweredOff
+ elif pwr_state == "poweredOff":
+ raise exception.Invalid(_("instance - %s is poweredOff and hence "
+ " can't be suspended.") % instance.name)
+ LOG.debug(_("VM %s was already in suspended state. So returning "
+ "without doing anything") % instance.name)
+
+ def resume(self, instance, callback):
+ """Resume the specified instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ if pwr_state.lower() == "suspended":
+ LOG.debug(_("Resuming the VM %s") % instance.name)
+ suspend_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._wait_with_callback(instance.id, suspend_task, callback)
+ LOG.debug(_("Resumed the VM %s ") % instance.name)
+ else:
+ raise exception.Invalid(_("instance - %s not in Suspended state "
+ "and hence can't be Resumed.") % instance.name)
+
+ def get_info(self, instance_name):
+ """Return data about the VM instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance_name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance_name)
+
+ lst_properties = ["summary.config.numCpu",
+ "summary.config.memorySizeMB",
+ "runtime.powerState"]
+ vm_props = self._session._call_method(vim_util,
+ "get_object_properties", None, vm_ref, "VirtualMachine",
+ lst_properties)
+ max_mem = None
+ pwr_state = None
+ num_cpu = None
+ for elem in vm_props:
+ for prop in elem.propSet:
+ if prop.name == "summary.config.numCpu":
+ num_cpu = int(prop.val)
+ elif prop.name == "summary.config.memorySizeMB":
+ # In MB, but we want in KB
+ max_mem = int(prop.val) * 1024
+ elif prop.name == "runtime.powerState":
+ pwr_state = VMWARE_POWER_STATES[prop.val]
+
+ return {'state': pwr_state,
+ 'max_mem': max_mem,
+ 'mem': max_mem,
+ 'num_cpu': num_cpu,
+ 'cpu_time': 0}
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ raise exception.APIError("get_diagnostics not implemented for "
+ "vmwareapi")
+
+ def get_console_output(self, instance):
+ """Return snapshot of console."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ param_list = {"id": str(vm_ref)}
+ base_url = "%s://%s/screen?%s" % (self._session._scheme,
+ self._session._host_ip,
+ urllib.urlencode(param_list))
+ request = urllib2.Request(base_url)
+ base64string = base64.encodestring(
+ '%s:%s' % (
+ self._session._host_username,
+ self._session._host_password)).replace('\n', '')
+ request.add_header("Authorization", "Basic %s" % base64string)
+ result = urllib2.urlopen(request)
+ if result.code == 200:
+ return result.read()
+ else:
+ return ""
+
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console."""
+ return 'http://fakeajaxconsole/fake_url'
+
+ def _set_machine_id(self, client_factory, instance):
+ """
+ Set the machine id of the VM for guest tools to pick up and change
+ the IP.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ mac_addr = instance.mac_address
+ net_mask = network["netmask"]
+ gateway = network["gateway"]
+ ip_addr = db.instance_get_fixed_address(context.get_admin_context(),
+ instance['id'])
+ machine_id_chanfge_spec = \
+ vm_util.get_machine_id_change_spec(client_factory, mac_addr,
+ ip_addr, net_mask, gateway)
+ LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
+ "with ip - %(ip_addr)s") %
+ ({'name': instance.name,
+ 'ip_addr': ip_addr}))
+ reconfig_task = self._session._call_method(self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=machine_id_chanfge_spec)
+ self._session._wait_for_task(instance.id, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %(name)s to set the machine id "
+ "with ip - %(ip_addr)s") %
+ ({'name': instance.name,
+ 'ip_addr': ip_addr}))
+
+ def _get_datacenter_name_and_ref(self):
+ """Get the datacenter name and the reference."""
+ dc_obj = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["name"])
+ return dc_obj[0].obj, dc_obj[0].propSet[0].val
+
+ def _path_exists(self, ds_browser, ds_path):
+ """Check if the path exists on the datastore."""
+ search_task = self._session._call_method(self._session._get_vim(),
+ "SearchDatastore_Task",
+ ds_browser,
+ datastorePath=ds_path)
+ # Wait till the state changes from queued or running.
+ # If an error state is returned, it means that the path doesn't exist.
+ while True:
+ task_info = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ search_task, "Task", "info")
+ if task_info.state in ['queued', 'running']:
+ time.sleep(2)
+ continue
+ break
+ if task_info.state == "error":
+ return False
+ return True
+
+ def _mkdir(self, ds_path):
+ """
+ Creates a directory at the path specified. If it is just "NAME",
+ then a directory with this name is created at the topmost level of the
+ DataStore.
+ """
+ LOG.debug(_("Creating directory with path %s") % ds_path)
+ self._session._call_method(self._session._get_vim(), "MakeDirectory",
+ self._session._get_vim().get_service_content().fileManager,
+ name=ds_path, createParentDirectories=False)
+ LOG.debug(_("Created directory with path %s") % ds_path)
+
+ def _get_vm_ref_from_the_name(self, vm_name):
+ """Get reference to the VM with the name specified."""
+ vms = self._session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ for vm in vms:
+ if vm.propSet[0].val == vm_name:
+ return vm.obj
+ return None
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
new file mode 100644
index 000000000..50c6baedf
--- /dev/null
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -0,0 +1,201 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Utility functions for Image transfer.
+"""
+
+from glance import client
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.virt.vmwareapi import io_util
+from nova.virt.vmwareapi import read_write_util
+
+LOG = logging.getLogger("nova.virt.vmwareapi.vmware_images")
+
+FLAGS = flags.FLAGS
+
+QUEUE_BUFFER_SIZE = 10
+
+
+def start_transfer(read_file_handle, data_size, write_file_handle=None,
+ glance_client=None, image_id=None, image_meta={}):
+ """Start the data transfer from the reader to the writer.
+ Reader writes to the pipe and the writer reads from the pipe. This means
+ that the total transfer time boils down to the slower of the read/write
+ and not the addition of the two times."""
+ # The pipe that acts as an intermediate store of data for reader to write
+ # to and writer to grab from.
+ thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
+ # The read thread. In case of glance it is the instance of the
+ # GlanceFileRead class. The glance client read returns an iterator
+ # and this class wraps that iterator to provide datachunks in calls
+ # to read.
+ read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
+
+ # In case of Glance - VMWare transfer, we just need a handle to the
+ # HTTP Connection that is to send transfer data to the VMWare datastore.
+ if write_file_handle:
+ write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
+ # In case of VMWare - Glance transfer, we relinquish VMWare HTTP file read
+ # handle to Glance Client instance, but to be sure of the transfer we need
+ # to be sure of the status of the image on glnace changing to active.
+ # The GlanceWriteThread handles the same for us.
+ elif glance_client and image_id:
+ write_thread = io_util.GlanceWriteThread(thread_safe_pipe,
+ glance_client, image_id, image_meta)
+ # Start the read and write threads.
+ read_event = read_thread.start()
+ write_event = write_thread.start()
+ try:
+ # Wait on the read and write events to signal their end
+ read_event.wait()
+ write_event.wait()
+ except Exception, exc:
+ # In case of any of the reads or writes raising an exception,
+ # stop the threads so that we un-necessarily don't keep the other one
+ # waiting.
+ read_thread.stop()
+ write_thread.stop()
+
+ # Log and raise the exception.
+ LOG.exception(exc)
+ raise exception.Error(exc)
+ finally:
+ # No matter what, try closing the read and write handles, if it so
+ # applies.
+ read_file_handle.close()
+ if write_file_handle:
+ write_file_handle.close()
+
+
+def fetch_image(image, instance, **kwargs):
+ """Fetch an image for attaching to the newly created VM."""
+ # Depending upon the image service, make appropriate image service call
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ func = _get_glance_image
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ func = _get_s3_image
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ func = _get_local_image
+ else:
+ raise NotImplementedError(_("The Image Service %s is not implemented")
+ % FLAGS.image_service)
+ return func(image, instance, **kwargs)
+
+
+def upload_image(image, instance, **kwargs):
+ """Upload the newly snapshotted VM disk file."""
+ # Depending upon the image service, make appropriate image service call
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ func = _put_glance_image
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ func = _put_s3_image
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ func = _put_local_image
+ else:
+ raise NotImplementedError(_("The Image Service %s is not implemented")
+ % FLAGS.image_service)
+ return func(image, instance, **kwargs)
+
+
+def _get_glance_image(image, instance, **kwargs):
+ """Download image from the glance image server."""
+ LOG.debug(_("Downloading image %s from glance image server") % image)
+ glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ metadata, read_iter = glance_client.get_image(image)
+ read_file_handle = read_write_util.GlanceFileRead(read_iter)
+ file_size = int(metadata['size'])
+ write_file_handle = read_write_util.VMWareHTTPWriteFile(
+ kwargs.get("host"),
+ kwargs.get("data_center_name"),
+ kwargs.get("datastore_name"),
+ kwargs.get("cookies"),
+ kwargs.get("file_path"),
+ file_size)
+ start_transfer(read_file_handle, file_size,
+ write_file_handle=write_file_handle)
+ LOG.debug(_("Downloaded image %s from glance image server") % image)
+
+
+def _get_s3_image(image, instance, **kwargs):
+ """Download image from the S3 image server."""
+ raise NotImplementedError
+
+
+def _get_local_image(image, instance, **kwargs):
+ """Download image from the local nova compute node."""
+ raise NotImplementedError
+
+
+def _put_glance_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to Glance image server."""
+ LOG.debug(_("Uploading image %s to the Glance image server") % image)
+ read_file_handle = read_write_util.VmWareHTTPReadFile(
+ kwargs.get("host"),
+ kwargs.get("data_center_name"),
+ kwargs.get("datastore_name"),
+ kwargs.get("cookies"),
+ kwargs.get("file_path"))
+ file_size = read_file_handle.get_size()
+ glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ # The properties and other fields that we need to set for the image.
+ image_metadata = {"is_public": True,
+ "disk_format": "vmdk",
+ "container_format": "bare",
+ "type": "vmdk",
+ "properties": {"vmware_adaptertype":
+ kwargs.get("adapter_type"),
+ "vmware_ostype": kwargs.get("os_type"),
+ "vmware_image_version":
+ kwargs.get("image_version")}}
+ start_transfer(read_file_handle, file_size, glance_client=glance_client,
+ image_id=image, image_meta=image_metadata)
+ LOG.debug(_("Uploaded image %s to the Glance image server") % image)
+
+
+def _put_local_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to the local nova compute node."""
+ raise NotImplementedError
+
+
+def _put_s3_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to S3 image server."""
+ raise NotImplementedError
+
+
+def get_vmdk_size_and_properties(image, instance):
+ """
+ Get size of the vmdk file that is to be downloaded for attach in spawn.
+ Need this to create the dummy virtual disk for the meta-data file. The
+ geometry of the disk created depends on the size.
+ """
+
+ LOG.debug(_("Getting image size for the image %s") % image)
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ glance_client = client.Client(FLAGS.glance_host,
+ FLAGS.glance_port)
+ meta_data = glance_client.get_image_meta(image)
+ size, properties = meta_data["size"], meta_data["properties"]
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ raise NotImplementedError
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ raise NotImplementedError
+ LOG.debug(_("Got image size of %(size)s for the image %(image)s") %
+ locals())
+ return size, properties
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
new file mode 100644
index 000000000..20c1b2b45
--- /dev/null
+++ b/nova/virt/vmwareapi_conn.py
@@ -0,0 +1,376 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to the VMware ESX platform.
+
+**Related Flags**
+
+:vmwareapi_host_ip: IPAddress of VMware ESX server.
+:vmwareapi_host_username: Username for connection to VMware ESX Server.
+:vmwareapi_host_password: Password for connection to VMware ESX Server.
+:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks
+ (default: 1.0).
+:vmwareapi_api_retry_count: The API retry count in case of failure such as
+ network failures (socket errors etc.)
+ (default: 10).
+
+"""
+
+import time
+
+from eventlet import event
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import vim
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi.vmops import VMWareVMOps
+
+
+LOG = logging.getLogger("nova.virt.vmwareapi_conn")
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vmwareapi_host_ip',
+ None,
+ 'URL for connection to VMWare ESX host.'
+ 'Required if connection_type is vmwareapi.')
+flags.DEFINE_string('vmwareapi_host_username',
+ None,
+ 'Username for connection to VMWare ESX host.'
+ 'Used only if connection_type is vmwareapi.')
+flags.DEFINE_string('vmwareapi_host_password',
+ None,
+ 'Password for connection to VMWare ESX host.'
+ 'Used only if connection_type is vmwareapi.')
+flags.DEFINE_float('vmwareapi_task_poll_interval',
+ 5.0,
+ 'The interval used for polling of remote tasks '
+ 'Used only if connection_type is vmwareapi')
+flags.DEFINE_float('vmwareapi_api_retry_count',
+ 10,
+ 'The number of times we retry on failures, '
+ 'e.g., socket error, etc.'
+ 'Used only if connection_type is vmwareapi')
+flags.DEFINE_string('vmwareapi_vlan_interface',
+ 'vmnic0',
+ 'Physical ethernet adapter name for vlan networking')
+
+TIME_BETWEEN_API_CALL_RETRIES = 2.0
+
+
+class Failure(Exception):
+ """Base Exception class for handling task failures."""
+
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ return str(self.details)
+
+
+def get_connection(_):
+ """Sets up the ESX host connection."""
+ host_ip = FLAGS.vmwareapi_host_ip
+ host_username = FLAGS.vmwareapi_host_username
+ host_password = FLAGS.vmwareapi_host_password
+ api_retry_count = FLAGS.vmwareapi_api_retry_count
+ if not host_ip or host_username is None or host_password is None:
+ raise Exception(_("Must specify vmwareapi_host_ip,"
+ "vmwareapi_host_username "
+ "and vmwareapi_host_password to use"
+ "connection_type=vmwareapi"))
+ return VMWareESXConnection(host_ip, host_username, host_password,
+ api_retry_count)
+
+
+class VMWareESXConnection(object):
+ """The ESX host connection object."""
+
+ def __init__(self, host_ip, host_username, host_password,
+ api_retry_count, scheme="https"):
+ session = VMWareAPISession(host_ip, host_username, host_password,
+ api_retry_count, scheme=scheme)
+ self._vmops = VMWareVMOps(session)
+
+ def init_host(self, host):
+ """Do the initialization that needs to be done."""
+ # FIXME(sateesh): implement this
+ pass
+
+ def list_instances(self):
+ """List VM instances."""
+ return self._vmops.list_instances()
+
+ def spawn(self, instance):
+ """Create VM instance."""
+ self._vmops.spawn(instance)
+
+ def snapshot(self, instance, name):
+ """Create snapshot from a running VM instance."""
+ self._vmops.snapshot(instance, name)
+
+ def reboot(self, instance):
+ """Reboot VM instance."""
+ self._vmops.reboot(instance)
+
+ def destroy(self, instance):
+ """Destroy VM instance."""
+ self._vmops.destroy(instance)
+
+ def pause(self, instance, callback):
+ """Pause VM instance."""
+ self._vmops.pause(instance, callback)
+
+ def unpause(self, instance, callback):
+ """Unpause paused VM instance."""
+ self._vmops.unpause(instance, callback)
+
+ def suspend(self, instance, callback):
+ """Suspend the specified instance."""
+ self._vmops.suspend(instance, callback)
+
+ def resume(self, instance, callback):
+ """Resume the suspended VM instance."""
+ self._vmops.resume(instance, callback)
+
+ def get_info(self, instance_id):
+ """Return info about the VM instance."""
+ return self._vmops.get_info(instance_id)
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ return self._vmops.get_info(instance)
+
+ def get_console_output(self, instance):
+ """Return snapshot of console."""
+ return self._vmops.get_console_output(instance)
+
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console."""
+ return self._vmops.get_ajax_console(instance)
+
+ def attach_volume(self, instance_name, device_path, mountpoint):
+ """Attach volume storage to VM instance."""
+ pass
+
+ def detach_volume(self, instance_name, mountpoint):
+ """Detach volume storage to VM instance."""
+ pass
+
+ def get_console_pool_info(self, console_type):
+ """Get info about the host on which the VM resides."""
+ return {'address': FLAGS.vmwareapi_host_ip,
+ 'username': FLAGS.vmwareapi_host_username,
+ 'password': FLAGS.vmwareapi_host_password}
+
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
+
+
+class VMWareAPISession(object):
+ """
+ Sets up a session with the ESX host and handles all
+ the calls made to the host.
+ """
+
+ def __init__(self, host_ip, host_username, host_password,
+ api_retry_count, scheme="https"):
+ self._host_ip = host_ip
+ self._host_username = host_username
+ self._host_password = host_password
+ self.api_retry_count = api_retry_count
+ self._scheme = scheme
+ self._session_id = None
+ self.vim = None
+ self._create_session()
+
+ def _get_vim_object(self):
+ """Create the VIM Object instance."""
+ return vim.Vim(protocol=self._scheme, host=self._host_ip)
+
+ def _create_session(self):
+ """Creates a session with the ESX host."""
+ while True:
+ try:
+ # Login and setup the session with the ESX host for making
+ # API calls
+ self.vim = self._get_vim_object()
+ session = self.vim.Login(
+ self.vim.get_service_content().sessionManager,
+ userName=self._host_username,
+ password=self._host_password)
+ # Terminate the earlier session, if possible ( For the sake of
+ # preserving sessions as there is a limit to the number of
+ # sessions we can have )
+ if self._session_id:
+ try:
+ self.vim.TerminateSession(
+ self.vim.get_service_content().sessionManager,
+ sessionId=[self._session_id])
+ except Exception, excep:
+ # This exception is something we can live with. It is
+ # just an extra caution on our side. The session may
+ # have been cleared. We could have made a call to
+ # SessionIsActive, but that is an overhead because we
+ # anyway would have to call TerminateSession.
+ LOG.debug(excep)
+ self._session_id = session.key
+ return
+ except Exception, excep:
+ LOG.critical(_("In vmwareapi:_create_session, "
+ "got this exception: %s") % excep)
+ raise exception.Error(excep)
+
+ def __del__(self):
+ """Logs-out the session."""
+ # Logout to avoid un-necessary increase in session count at the
+ # ESX host
+ try:
+ self.vim.Logout(self.vim.get_service_content().sessionManager)
+ except Exception, excep:
+ # It is just cautionary on our part to do a logout in del just
+ # to ensure that the session is not left active.
+ LOG.debug(excep)
+
+ def _is_vim_object(self, module):
+ """Check if the module is a VIM Object instance."""
+ return isinstance(module, vim.Vim)
+
+ def _call_method(self, module, method, *args, **kwargs):
+ """
+ Calls a method within the module specified with
+ args provided.
+ """
+ args = list(args)
+ retry_count = 0
+ exc = None
+ last_fault_list = []
+ while True:
+ try:
+ if not self._is_vim_object(module):
+ # If it is not the first try, then get the latest
+ # vim object
+ if retry_count > 0:
+ args = args[1:]
+ args = [self.vim] + args
+ retry_count += 1
+ temp_module = module
+
+ for method_elem in method.split("."):
+ temp_module = getattr(temp_module, method_elem)
+
+ return temp_module(*args, **kwargs)
+ except error_util.VimFaultException, excep:
+ # If it is a Session Fault Exception, it may point
+ # to a session gone bad. So we try re-creating a session
+ # and then proceeding ahead with the call.
+ exc = excep
+ if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
+ # Because of the idle session returning an empty
+ # RetrievePropertiesResponse and also the same is returned
+ # when there is say empty answer to the query for
+ # VMs on the host ( as in no VMs on the host), we have no
+ # way to differentiate.
+ # So if the previous response was also am empty response
+ # and after creating a new session, we get the same empty
+ # response, then we are sure of the response being supposed
+ # to be empty.
+ if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
+ return []
+ last_fault_list = excep.fault_list
+ self._create_session()
+ else:
+ # No re-trying for errors for API call has gone through
+ # and is the caller's fault. Caller should handle these
+ # errors. e.g, InvalidArgument fault.
+ break
+ except error_util.SessionOverLoadException, excep:
+ # For exceptions which may come because of session overload,
+ # we retry
+ exc = excep
+ except Exception, excep:
+ # If it is a proper exception, say not having furnished
+ # proper data in the SOAP call or the retry limit having
+ # exceeded, we raise the exception
+ exc = excep
+ break
+ # If retry count has been reached then break and
+ # raise the exception
+ if retry_count > self.api_retry_count:
+ break
+ time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
+
+ LOG.critical(_("In vmwareapi:_call_method, "
+ "got this exception: %s") % exc)
+ raise
+
+ def _get_vim(self):
+ """Gets the VIM object reference."""
+ if self.vim is None:
+ self._create_session()
+ return self.vim
+
+ def _wait_for_task(self, instance_id, task_ref):
+ """
+ Return a Deferred that will give the result of the given task.
+ The task is polled until it completes.
+ """
+ done = event.Event()
+ loop = utils.LoopingCall(self._poll_task, instance_id, task_ref,
+ done)
+ loop.start(FLAGS.vmwareapi_task_poll_interval, now=True)
+ ret_val = done.wait()
+ loop.stop()
+ return ret_val
+
+ def _poll_task(self, instance_id, task_ref, done):
+ """
+ Poll the given task, and fires the given Deferred if we
+ get a result.
+ """
+ try:
+ task_info = self._call_method(vim_util, "get_dynamic_property",
+ task_ref, "Task", "info")
+ task_name = task_info.name
+ action = dict(
+ instance_id=int(instance_id),
+ action=task_name[0:255],
+ error=None)
+ if task_info.state in ['queued', 'running']:
+ return
+ elif task_info.state == 'success':
+ LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
+ "status: success") % locals())
+ done.send("success")
+ else:
+ error_info = str(task_info.error.localizedMessage)
+ action["error"] = error_info
+ LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
+ "status: error %(error_info)s") % locals())
+ done.send_exception(exception.Error(error_info))
+ db.instance_action_create(context.get_admin_context(), action)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
+ done.send_exception(excep)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 018d0dcd3..4434dbf0b 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -60,8 +60,8 @@ from nova import exception
from nova import log as logging
-_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
- 'PBD', 'VDI', 'VIF', 'VM', 'task']
+_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',
+ 'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
@@ -78,30 +78,36 @@ def reset():
for c in _CLASSES:
_db_content[c] = {}
create_host('fake')
- create_vm('fake', 'Running', is_a_template=False, is_control_domain=True)
+ create_vm('fake',
+ 'Running',
+ is_a_template=False,
+ is_control_domain=True)
+
+
+def reset_table(table):
+ if not table in _CLASSES:
+ return
+ _db_content[table] = {}
def create_host(name_label):
- return _create_object('host', {
- 'name_label': name_label,
- })
+ return _create_object('host',
+ {'name_label': name_label})
def create_network(name_label, bridge):
- return _create_object('network', {
- 'name_label': name_label,
- 'bridge': bridge,
- })
+ return _create_object('network',
+ {'name_label': name_label,
+ 'bridge': bridge})
def create_vm(name_label, status,
is_a_template=False, is_control_domain=False):
- return _create_object('VM', {
- 'name_label': name_label,
- 'power-state': status,
- 'is_a_template': is_a_template,
- 'is_control_domain': is_control_domain,
- })
+ return _create_object('VM',
+ {'name_label': name_label,
+ 'power-state': status,
+ 'is_a_template': is_a_template,
+ 'is_control_domain': is_control_domain})
def destroy_vm(vm_ref):
@@ -123,27 +129,24 @@ def destroy_vdi(vdi_ref):
def create_vdi(name_label, read_only, sr_ref, sharable):
- return _create_object('VDI', {
- 'name_label': name_label,
- 'read_only': read_only,
- 'SR': sr_ref,
- 'type': '',
- 'name_description': '',
- 'sharable': sharable,
- 'other_config': {},
- 'location': '',
- 'xenstore_data': '',
- 'sm_config': {},
- 'VBDs': {},
- })
+ return _create_object('VDI',
+ {'name_label': name_label,
+ 'read_only': read_only,
+ 'SR': sr_ref,
+ 'type': '',
+ 'name_description': '',
+ 'sharable': sharable,
+ 'other_config': {},
+ 'location': '',
+ 'xenstore_data': '',
+ 'sm_config': {},
+ 'VBDs': {}})
def create_vbd(vm_ref, vdi_ref):
- vbd_rec = {
- 'VM': vm_ref,
- 'VDI': vdi_ref,
- 'currently_attached': False,
- }
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
@@ -162,20 +165,31 @@ def after_VBD_create(vbd_ref, vbd_rec):
vbd_rec['vm_name_label'] = vm_name_label
+def after_VM_create(vm_ref, vm_rec):
+ """Create read-only fields in the VM record."""
+ if 'is_control_domain' not in vm_rec:
+ vm_rec['is_control_domain'] = False
+
+
def create_pbd(config, host_ref, sr_ref, attached):
- return _create_object('PBD', {
- 'device-config': config,
- 'host': host_ref,
- 'SR': sr_ref,
- 'currently-attached': attached,
- })
+ return _create_object('PBD',
+ {'device-config': config,
+ 'host': host_ref,
+ 'SR': sr_ref,
+ 'currently-attached': attached})
def create_task(name_label):
- return _create_object('task', {
- 'name_label': name_label,
- 'status': 'pending',
- })
+ return _create_object('task',
+ {'name_label': name_label,
+ 'status': 'pending'})
+
+
+def create_local_pifs():
+ """Adds a PIF for each to the local database with VLAN=-1.
+ Do this one per host."""
+ for host_ref in _db_content['host'].keys():
+ _create_local_pif(host_ref)
def create_local_srs():
@@ -186,25 +200,34 @@ def create_local_srs():
def _create_local_sr(host_ref):
- sr_ref = _create_object('SR', {
- 'name_label': 'Local storage',
- 'type': 'lvm',
- 'content_type': 'user',
- 'shared': False,
- 'physical_size': str(1 << 30),
- 'physical_utilisation': str(0),
- 'virtual_allocation': str(0),
- 'other_config': {
- 'i18n-original-value-name_label': 'Local storage',
- 'i18n-key': 'local-storage',
- },
- 'VDIs': []
- })
+ sr_ref = _create_object(
+ 'SR',
+ {'name_label': 'Local storage',
+ 'type': 'lvm',
+ 'content_type': 'user',
+ 'shared': False,
+ 'physical_size': str(1 << 30),
+ 'physical_utilisation': str(0),
+ 'virtual_allocation': str(0),
+ 'other_config': {
+ 'i18n-original-value-name_label': 'Local storage',
+ 'i18n-key': 'local-storage'},
+ 'VDIs': []})
pbd_ref = create_pbd('', host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
+def _create_local_pif(host_ref):
+ pif_ref = _create_object('PIF',
+ {'name-label': 'Fake PIF',
+ 'MAC': '00:11:22:33:44:55',
+ 'physical': True,
+ 'VLAN': -1,
+ 'device': 'fake0',
+ 'host_uuid': host_ref})
+
+
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
@@ -228,6 +251,21 @@ def _create_sr(table, obj):
return sr_ref
+def _create_vlan(pif_ref, vlan_num, network_ref):
+ pif_rec = get_record('PIF', pif_ref)
+ vlan_pif_ref = _create_object('PIF',
+ {'name-label': 'Fake VLAN PIF',
+ 'MAC': '00:11:22:33:44:55',
+ 'physical': True,
+ 'VLAN': vlan_num,
+ 'device': pif_rec['device'],
+ 'host_uuid': pif_rec['host_uuid']})
+ return _create_object('VLAN',
+ {'tagged-pif': pif_ref,
+ 'untagged-pif': vlan_pif_ref,
+ 'tag': vlan_num})
+
+
def get_all(table):
return _db_content[table].keys()
@@ -286,10 +324,39 @@ class SessionBase(object):
rec['currently_attached'] = False
rec['device'] = ''
+ def PIF_get_all_records_where(self, _1, _2):
+ # TODO (salvatore-orlando): filter table on _2
+ return _db_content['PIF']
+
+ def VM_get_xenstore_data(self, _1, vm_ref):
+ return _db_content['VM'][vm_ref].get('xenstore_data', '')
+
+ def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
+ db_ref = _db_content['VM'][vm_ref]
+ if not 'xenstore_data' in db_ref:
+ return
+ db_ref['xenstore_data'][key] = None
+
+ def network_get_all_records_where(self, _1, _2):
+ # TODO (salvatore-orlando): filter table on _2
+ return _db_content['network']
+
+ def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
+ db_ref = _db_content['VM'][vm_ref]
+ if not 'xenstore_data' in db_ref:
+ db_ref['xenstore_data'] = {}
+ db_ref['xenstore_data'][key] = value
+
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
+ def host_call_plugin(*args):
+ return 'herp'
+
+ def network_get_all_records_where(self, _1, filter):
+ return self.xenapi.network.get_all_records()
+
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
@@ -309,10 +376,9 @@ class SessionBase(object):
def _login(self, method, params):
self._session = str(uuid.uuid4())
- _db_content['session'][self._session] = {
- 'uuid': str(uuid.uuid4()),
- 'this_host': _db_content['host'].keys()[0],
- }
+ _db_content['session'][self._session] = \
+ {'uuid': str(uuid.uuid4()),
+ 'this_host': _db_content['host'].keys()[0]}
def _logout(self):
s = self._session
@@ -373,7 +439,6 @@ class SessionBase(object):
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
-
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
@@ -396,12 +461,13 @@ class SessionBase(object):
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
+ if (ref in _db_content[cls]):
+ if (field in _db_content[cls][ref]):
+ return _db_content[cls][ref][field]
+ else:
+ raise Failure(['HANDLE_INVALID', cls, ref])
- if (ref in _db_content[cls] and
- field in _db_content[cls][ref]):
- return _db_content[cls][ref][field]
-
- LOG.debuug(_('Raising NotImplemented'))
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
@@ -428,12 +494,16 @@ class SessionBase(object):
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
+ is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
- expected = is_sr_create and 10 or 2
+ expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = is_sr_create and \
- _create_sr(cls, params) or _create_object(cls, params[1])
+ _create_sr(cls, params) or \
+ is_vlan_create and \
+ _create_vlan(params[1], params[2], params[3]) or \
+ _create_object(cls, params[1])
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
@@ -473,7 +543,7 @@ class SessionBase(object):
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
- raise Failure(['HANDLE_INVALID', 'session', self._session])
+ raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
index c0406d8f0..94d8e5199 100644
--- a/nova/virt/xenapi/network_utils.py
+++ b/nova/virt/xenapi/network_utils.py
@@ -28,11 +28,26 @@ class NetworkHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
+ @classmethod
+ def find_network_with_name_label(cls, session, name_label):
+ networks = session.call_xenapi('network.get_by_name_label', name_label)
+ if len(networks) == 1:
+ return networks[0]
+ elif len(networks) > 1:
+ raise Exception(_('Found non-unique network'
+ ' for name_label %s') % name_label)
+ else:
+ return None
@classmethod
def find_network_with_bridge(cls, session, bridge):
- """Return the network on which the bridge is attached, if found."""
- expr = 'field "bridge" = "%s"' % bridge
+ """
+ Return the network on which the bridge is attached, if found.
+ The bridge is defined in the nova db and can be found either in the
+ 'bridge' or 'name_label' fields of the XenAPI network record.
+ """
+ expr = 'field "name__label" = "%s" or ' \
+ 'field "bridge" = "%s"' % (bridge, bridge)
networks = session.call_xenapi('network.get_all_records_where', expr)
if len(networks) == 1:
return networks.keys()[0]
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 80cc3035d..d07d60800 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -22,12 +22,16 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
import os
import pickle
import re
+import tempfile
import time
import urllib
+import uuid
from xml.dom import minidom
from eventlet import event
import glance.client
+from nova import context
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -35,14 +39,17 @@ from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
+from nova.virt import disk
from nova.virt import images
from nova.virt.xenapi import HelperBase
from nova.virt.xenapi.volume_utils import StorageError
-FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
+FLAGS = flags.FLAGS
+flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
+
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
@@ -63,11 +70,14 @@ class ImageType:
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
+ 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
+ DISK_VHD = 3
class VMHelper(HelperBase):
@@ -76,62 +86,81 @@ class VMHelper(HelperBase):
"""
@classmethod
- def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
+ def create_vm(cls, session, instance, kernel, ramdisk,
+ use_pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new
VM reference.
- the pv_kernel flag indicates whether the guest is HVM or PV
+ the use_pv_kernel flag indicates whether the guest is HVM or PV
+
+ There are 3 scenarios:
+
+ 1. Using paravirtualization, kernel passed in
+
+ 2. Using paravirtualization, kernel within the image
+
+ 3. Using hardware virtualization
"""
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.\
+ get_instance_type(instance.instance_type)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
- 'name_label': instance.name,
- 'name_description': '',
+ 'actions_after_crash': 'destroy',
+ 'actions_after_reboot': 'restart',
+ 'actions_after_shutdown': 'destroy',
+ 'affinity': '',
+ 'blocked_operations': {},
+ 'ha_always_run': False,
+ 'ha_restart_priority': '',
+ 'HVM_boot_params': {},
+ 'HVM_boot_policy': '',
'is_a_template': False,
- 'memory_static_min': '0',
- 'memory_static_max': mem,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
- 'VCPUs_at_startup': vcpus,
- 'VCPUs_max': vcpus,
- 'VCPUs_params': {},
- 'actions_after_shutdown': 'destroy',
- 'actions_after_reboot': 'restart',
- 'actions_after_crash': 'destroy',
- 'PV_bootloader': '',
- 'PV_kernel': '',
- 'PV_ramdisk': '',
+ 'memory_static_min': '0',
+ 'memory_static_max': mem,
+ 'memory_target': mem,
+ 'name_description': '',
+ 'name_label': instance.name,
+ 'other_config': {'allowvssprovider': False},
+ 'other_config': {},
+ 'PCI_bus': '',
+ 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
+ 'viridian': 'true', 'timeoffset': '0'},
'PV_args': '',
+ 'PV_bootloader': '',
'PV_bootloader_args': '',
+ 'PV_kernel': '',
'PV_legacy_args': '',
- 'HVM_boot_policy': '',
- 'HVM_boot_params': {},
- 'platform': {},
- 'PCI_bus': '',
+ 'PV_ramdisk': '',
'recommendations': '',
- 'affinity': '',
+ 'tags': [],
'user_version': '0',
- 'other_config': {},
- }
- #Complete VM configuration record according to the image type
- #non-raw/raw with PV kernel/raw in HVM mode
- if instance.kernel_id:
- rec['PV_bootloader'] = ''
- rec['PV_kernel'] = kernel
- rec['PV_ramdisk'] = ramdisk
- rec['PV_args'] = 'root=/dev/xvda1'
- rec['PV_bootloader_args'] = ''
- rec['PV_legacy_args'] = ''
- else:
- if pv_kernel:
- rec['PV_args'] = 'noninteractive'
- rec['PV_bootloader'] = 'pygrub'
+ 'VCPUs_at_startup': vcpus,
+ 'VCPUs_max': vcpus,
+ 'VCPUs_params': {},
+ 'xenstore_data': {}}
+
+ # Complete VM configuration record according to the image type
+ # non-raw/raw with PV kernel/raw in HVM mode
+ if use_pv_kernel:
+ rec['platform']['nx'] = 'false'
+ if instance.kernel_id:
+ # 1. Kernel explicitly passed in, use that
+ rec['PV_args'] = 'root=/dev/xvda1'
+ rec['PV_kernel'] = kernel
+ rec['PV_ramdisk'] = ramdisk
else:
- rec['HVM_boot_policy'] = 'BIOS order'
- rec['HVM_boot_params'] = {'order': 'dc'}
- rec['platform'] = {'acpi': 'true', 'apic': 'true',
- 'pae': 'true', 'viridian': 'true'}
+ # 2. Use kernel within the image
+ rec['PV_args'] = 'clocksource=jiffies'
+ rec['PV_bootloader'] = 'pygrub'
+ else:
+ # 3. Using hardware virtualization
+ rec['platform']['nx'] = 'true'
+ rec['HVM_boot_params'] = {'order': 'dc'}
+ rec['HVM_boot_policy'] = 'BIOS order'
+
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
instance_name = instance.name
@@ -140,7 +169,8 @@ class VMHelper(HelperBase):
@classmethod
def ensure_free_mem(cls, session, instance):
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.get_instance_type(
+ instance.instance_type)
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
@@ -175,13 +205,13 @@ class VMHelper(HelperBase):
@classmethod
def find_vbd_by_number(cls, session, vm_ref, number):
"""Get the VBD reference from the device number"""
- vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
- if vbds:
- for vbd in vbds:
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
try:
- vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref)
if vbd_rec['userdevice'] == str(number):
- return vbd
+ return vbd_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@@ -201,26 +231,26 @@ class VMHelper(HelperBase):
"""Destroy VBD from host database"""
try:
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- session.wait_for_task(0, task)
+ session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
- def create_vif(cls, session, vm_ref, network_ref, mac_address):
+ def create_vif(cls, session, vm_ref, network_ref, mac_address,
+ dev, rxtx_cap=0):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
- vif_rec['device'] = '0'
+ vif_rec['device'] = str(dev)
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
vif_rec['other_config'] = {}
- vif_rec['qos_algorithm_type'] = ''
- vif_rec['qos_algorithm_params'] = {}
+ vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else ''
+ vif_rec['qos_algorithm_params'] = \
+ {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {}
LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
@@ -249,24 +279,40 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
+ def get_vdi_for_vm_safely(cls, session, vm_ref):
+ vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
+ if vdi_refs is None:
+ raise Exception(_("No VDIs found for VM %s") % vm_ref)
+ else:
+ num_vdis = len(vdi_refs)
+ if num_vdis != 1:
+ raise Exception(
+ _("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
+
+ vdi_ref = vdi_refs[0]
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, vdi_rec
+
+ @classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
- """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
- Snapshot VHD
- """
+ """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
% locals())
- vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
- template_vm_ref = session.wait_for_task(instance_id, task)
- template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
+ template_vm_ref = session.wait_for_task(task, instance_id)
+ template_vdi_rec = cls.get_vdi_for_vm_safely(session,
+ template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
LOG.debug(_('Created snapshot %(template_vm_ref)s from'
@@ -276,29 +322,56 @@ class VMHelper(HelperBase):
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
- return template_vm_ref, [template_vdi_uuid, parent_uuid]
+ template_vdi_uuids = {'image': parent_uuid,
+ 'snap': template_vdi_uuid}
+ return template_vm_ref, template_vdi_uuids
@classmethod
- def upload_image(cls, session, instance_id, vdi_uuids, image_id):
+ def get_sr(cls, session, sr_label='slices'):
+ """Finds the SR named by the given name label and returns
+ the UUID"""
+ return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
+
+ @classmethod
+ def get_sr_path(cls, session):
+ """Return the path to our storage repository
+
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+
+ @classmethod
+ def upload_image(cls, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
+ os_type = instance.os_type or FLAGS.default_os_type
+
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port}
+ 'glance_port': FLAGS.glance_port,
+ 'sr_path': cls.get_sr_path(session),
+ 'os_type': os_type}
kwargs = {'params': pickle.dumps(params)}
- task = session.async_call_plugin('glance', 'put_vdis', kwargs)
- session.wait_for_task(instance_id, task)
+ task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
+ session.wait_for_task(task, instance.id)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project, type):
+ def fetch_image(cls, session, instance_id, image, user, project,
+ image_type):
"""
- type is interpreted as an ImageType instance
+ image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
@@ -308,87 +381,220 @@ class VMHelper(HelperBase):
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
- access, type)
+ access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret, type)
+ access, user.secret,
+ image_type)
+
+ @classmethod
+ def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ image_type):
+ LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
+ % locals())
+
+ sr_ref = safe_find_sr(session)
+
+ # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
+ # have the `uuid` module. To work around this, we generate the uuids
+ # here (under Python 2.6+) and pass them as arguments
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
+
+ params = {'image_id': image,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port,
+ 'uuid_stack': uuid_stack,
+ 'sr_path': cls.get_sr_path(session)}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'download_vhd', kwargs)
+ vdi_uuid = session.wait_for_task(task, instance_id)
+
+ cls.scan_sr(session, instance_id, sr_ref)
+
+ # Set the name-label to ease debugging
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ name_label = get_name_label_for_image(image)
+ session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
+
+ LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
+ % locals())
+ return vdi_uuid
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access, type):
- sr = find_sr(session)
- if sr is None:
- raise exception.NotFound('Cannot find SR to write VDI to')
+ def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ image_type):
+ """Fetch the image from Glance
+
+ NOTE:
+ Unlike _fetch_image_glance_vhd, this method does not use the Glance
+ plugin; instead, it streams the disks through domU to the VDI
+ directly.
- c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ """
+ # FIXME(sirp): Since the Glance plugin seems to be required for the
+ # VHD disk, it may be worth using the plugin for both VHD and RAW and
+ # DISK restores
+ sr_ref = safe_find_sr(session)
- meta, image_file = c.get_image(image)
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
- if type == ImageType.DISK:
+
+ if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
- vdi_size, False)
+ name_label = get_name_label_for_image(image)
+ vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
- with_vdi_attached_here(session, vdi, False,
+ with_vdi_attached_here(session, vdi_ref, False,
lambda dev:
- _stream_disk(dev, type,
+ _stream_disk(dev, image_type,
virtual_size, image_file))
- if (type == ImageType.KERNEL_RAMDISK):
+ if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
- LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
fn = "copy_kernel_vdi"
args = {}
- args['vdi-ref'] = vdi
+ args['vdi-ref'] = vdi_ref
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(instance_id, task)
+ filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
- session.get_xenapi().VDI.destroy(vdi)
- LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
+ session.get_xenapi().VDI.destroy(vdi_ref)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
return filename
else:
- return session.get_xenapi().VDI.get_uuid(vdi)
+ return session.get_xenapi().VDI.get_uuid(vdi_ref)
+
+ @classmethod
+ def determine_disk_image_type(cls, instance):
+ """Disk Image Types are used to determine where the kernel will reside
+ within an image. To figure out which type we're dealing with, we use
+ the following rules:
+
+ 1. If we're using Glance, we can use the image_type field to
+ determine the image_type
+
+ 2. If we're not using Glance, then we need to deduce this based on
+ whether a kernel_id is specified.
+ """
+ def log_disk_format(image_type):
+ pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ ImageType.DISK: 'DISK',
+ ImageType.DISK_RAW: 'DISK_RAW',
+ ImageType.DISK_VHD: 'DISK_VHD'}
+ disk_format = pretty_format[image_type]
+ image_id = instance.image_id
+ instance_id = instance.id
+ LOG.debug(_("Detected %(disk_format)s format for image "
+ "%(image_id)s, instance %(instance_id)s") % locals())
+
+ def determine_from_glance():
+ glance_disk_format2nova_type = {
+ 'ami': ImageType.DISK,
+ 'aki': ImageType.KERNEL_RAMDISK,
+ 'ari': ImageType.KERNEL_RAMDISK,
+ 'raw': ImageType.DISK_RAW,
+ 'vhd': ImageType.DISK_VHD}
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta = client.get_image_meta(instance.image_id)
+ disk_format = meta['disk_format']
+ try:
+ return glance_disk_format2nova_type[disk_format]
+ except KeyError:
+ raise exception.NotFound(
+ _("Unrecognized disk_format '%(disk_format)s'")
+ % locals())
+
+ def determine_from_instance():
+ if instance.kernel_id:
+ return ImageType.DISK
+ else:
+ return ImageType.DISK_RAW
+
+ # FIXME(sirp): can we unify the ImageService and xenapi_image_service
+ # abstractions?
+ if FLAGS.xenapi_image_service == 'glance':
+ image_type = determine_from_glance()
+ else:
+ image_type = determine_from_instance()
+
+ log_disk_format(image_type)
+ return image_type
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access,
+ image_type):
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(
+ session, instance_id, image, access, image_type)
+ else:
+ return cls._fetch_image_glance_disk(
+ session, instance_id, image, access, image_type)
@classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, type):
+ secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
+ if image_type == ImageType.KERNEL_RAMDISK:
+ fn = 'get_kernel'
+ else:
+ fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if type != ImageType.KERNEL_RAMDISK:
+ if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
- if type == ImageType.DISK_RAW:
+ if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid = session.wait_for_task(instance_id, task)
+ uuid = session.wait_for_task(task, instance_id)
return uuid
@classmethod
- def lookup_image(cls, session, instance_id, vdi_ref):
+ def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
+ os_type):
+ """
+ Determine whether the VM will use a paravirtualized kernel or if it
+ will use hardware virtualization.
+
+ 1. Objectstore (any image type):
+ We use plugin to figure out whether the VDI uses PV
+
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
+ """
if FLAGS.xenapi_image_service == 'glance':
- return cls._lookup_image_glance(session, vdi_ref)
+ # 2, 3, 4: Glance
+ return cls._determine_is_pv_glance(
+ session, vdi_ref, disk_image_type, os_type)
else:
- return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
+ # 1. Objecstore
+ return cls._determine_is_pv_objectstore(session, instance_id,
+ vdi_ref)
@classmethod
- def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
+ def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(instance_id, task)
+ pv_str = session.wait_for_task(task, instance_id)
pv = None
if pv_str.lower() == 'true':
pv = True
@@ -398,58 +604,93 @@ class VMHelper(HelperBase):
return pv
@classmethod
- def _lookup_image_glance(cls, session, vdi_ref):
+ def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
+ os_type):
+ """
+ For a Glance image, determine if we need paravirtualization.
+
+ The relevant scenarios are:
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
+ """
+
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
+ if disk_image_type == ImageType.DISK_VHD:
+ # 2. VHD
+ if os_type == 'windows':
+ is_pv = False
+ else:
+ is_pv = True
+ elif disk_image_type == ImageType.DISK_RAW:
+ # 3. RAW
+ is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
+ elif disk_image_type == ImageType.DISK:
+ # 4. Disk
+ is_pv = True
+ else:
+ raise exception.Error(_("Unknown image format %(disk_image_type)s")
+ % locals())
- def is_vdi_pv(dev):
- LOG.debug(_("Running pygrub against %s"), dev)
- output = os.popen('pygrub -qn /dev/%s' % dev)
- for line in output.readlines():
- #try to find kernel string
- m = re.search('(?<=kernel:)/.*(?:>)', line)
- if m and m.group(0).find('xen') != -1:
- LOG.debug(_("Found Xen kernel %s") % m.group(0))
- return True
- LOG.debug(_("No Xen kernel found. Booting HVM."))
- return False
- return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv)
+ return is_pv
@classmethod
- def lookup(cls, session, i):
+ def lookup(cls, session, name_label):
"""Look the instance i up, and returns it if available"""
- vms = session.get_xenapi().VM.get_by_name_label(i)
- n = len(vms)
+ vm_refs = session.get_xenapi().VM.get_by_name_label(name_label)
+ n = len(vm_refs)
if n == 0:
return None
elif n > 1:
- raise exception.Duplicate(_('duplicate name found: %s') % i)
+ raise exception.Duplicate(_('duplicate name found: %s') %
+ name_label)
else:
- return vms[0]
+ return vm_refs[0]
@classmethod
- def lookup_vm_vdis(cls, session, vm):
+ def lookup_vm_vdis(cls, session, vm_ref):
"""Look for the VDIs that are attached to the VM"""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
- vbds = session.get_xenapi().VM.get_VBDs(vm)
- vdis = []
- if vbds:
- for vbd in vbds:
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ vdi_refs = []
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
try:
- vdi = session.get_xenapi().VBD.get_VDI(vbd)
+ vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
# Test valid VDI
- record = session.get_xenapi().VDI.get_record(vdi)
+ record = session.get_xenapi().VDI.get_record(vdi_ref)
LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
else:
- vdis.append(vdi)
- if len(vdis) > 0:
- return vdis
+ vdi_refs.append(vdi_ref)
+ if len(vdi_refs) > 0:
+ return vdi_refs
else:
return None
@classmethod
+ def preconfigure_instance(cls, session, instance, vdi_ref, network_info):
+ """Makes alterations to the image before launching as part of spawn.
+ """
+
+ # As mounting the image VDI is expensive, we only want do do it once,
+ # if at all, so determine whether it's required first, and then do
+ # everything
+ mount_required = False
+ key, net = _prepare_injectables(instance, network_info)
+ mount_required = key or net
+ if not mount_required:
+ return
+
+ with_vdi_attached_here(session, vdi_ref, False,
+ lambda dev: _mounted_processing(dev, key, net))
+
+ @classmethod
def lookup_kernel_ramdisk(cls, session, vm):
vm_rec = session.get_xenapi().VM.get_record(vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
@@ -496,6 +737,21 @@ class VMHelper(HelperBase):
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
+ @classmethod
+ def scan_sr(cls, session, instance_id=None, sr_ref=None):
+ """Scans the SR specified by sr_ref"""
+ if sr_ref:
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
+ task = session.call_xenapi('Async.SR.scan', sr_ref)
+ session.wait_for_task(task, instance_id)
+
+ @classmethod
+ def scan_default_sr(cls, session):
+ """Looks for the system default SR and triggers a re-scan"""
+ #FIXME(sirp/mdietz): refactor scan_default_sr in there
+ sr_ref = cls.get_sr(session)
+ session.call_xenapi('SR.scan', sr_ref)
+
def get_rrd(host, uuid):
"""Return the VM RRD XML as a string"""
@@ -538,12 +794,6 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
-def scan_sr(session, instance_id, sr_ref):
- LOG.debug(_("Re-scanning SR %s"), sr_ref)
- task = session.call_xenapi('Async.SR.scan', sr_ref)
- session.wait_for_task(instance_id, task)
-
-
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
@@ -568,7 +818,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
" %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
- scan_sr(session, instance_id, sr_ref)
+ VMHelper.scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
@@ -599,18 +849,29 @@ def get_vdi_for_vm_safely(session, vm_ref):
return vdi_ref, vdi_rec
+def safe_find_sr(session):
+ """Same as find_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ return sr_ref
+
+
def find_sr(session):
+ """Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
- srs = session.get_xenapi().SR.get_all()
- for sr in srs:
- sr_rec = session.get_xenapi().SR.get_record(sr)
+ sr_refs = session.get_xenapi().SR.get_all()
+ for sr_ref in sr_refs:
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
- for pbd in sr_rec['PBDs']:
- pbd_rec = session.get_xenapi().PBD.get_record(pbd)
+ for pbd_ref in sr_rec['PBDs']:
+ pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
if pbd_rec['host'] == host:
- return sr
+ return sr_ref
return None
@@ -635,11 +896,11 @@ def remap_vbd_dev(dev):
return remapped_dev
-def with_vdi_attached_here(session, vdi, read_only, f):
+def with_vdi_attached_here(session, vdi_ref, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
vbd_rec['VM'] = this_vm_ref
- vbd_rec['VDI'] = vdi
+ vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = 'autodetect'
vbd_rec['bootable'] = False
vbd_rec['mode'] = read_only and 'RO' or 'RW'
@@ -650,28 +911,28 @@ def with_vdi_attached_here(session, vdi, read_only, f):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VDI %s ... '), vdi)
- vbd = session.get_xenapi().VBD.create(vbd_rec)
- LOG.debug(_('Creating VBD for VDI %s done.'), vdi)
+ LOG.debug(_('Creating VBD for VDI %s ... '), vdi_ref)
+ vbd_ref = session.get_xenapi().VBD.create(vbd_rec)
+ LOG.debug(_('Creating VBD for VDI %s done.'), vdi_ref)
try:
- LOG.debug(_('Plugging VBD %s ... '), vbd)
- session.get_xenapi().VBD.plug(vbd)
- LOG.debug(_('Plugging VBD %s done.'), vbd)
- orig_dev = session.get_xenapi().VBD.get_device(vbd)
- LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals())
+ LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
+ session.get_xenapi().VBD.plug(vbd_ref)
+ LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
+ orig_dev = session.get_xenapi().VBD.get_device(vbd_ref)
+ LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals())
dev = remap_vbd_dev(orig_dev)
if dev != orig_dev:
- LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
+ LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s') % locals())
return f(dev)
finally:
- LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
- vbd_unplug_with_retry(session, vbd)
- ignore_failure(session.get_xenapi().VBD.destroy, vbd)
- LOG.debug(_('Destroying VBD for VDI %s done.'), vdi)
+ LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
+ vbd_unplug_with_retry(session, vbd_ref)
+ ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref)
+ LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
-def vbd_unplug_with_retry(session, vbd):
+def vbd_unplug_with_retry(session, vbd_ref):
"""Call VBD.unplug on the given VBD, with a retry if we get
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
@@ -679,7 +940,7 @@ def vbd_unplug_with_retry(session, vbd):
# FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
- session.get_xenapi().VBD.unplug(vbd)
+ session.get_xenapi().VBD.unplug(vbd_ref)
LOG.debug(_('VBD.unplug successful first time.'))
return
except VMHelper.XenAPI.Failure, e:
@@ -687,6 +948,7 @@ def vbd_unplug_with_retry(session, vbd):
e.details[0] == 'DEVICE_DETACH_REJECTED'):
LOG.debug(_('VBD.unplug rejected: retrying...'))
time.sleep(1)
+ LOG.debug(_('Not sleeping anymore!'))
elif (len(e.details) > 0 and
e.details[0] == 'DEVICE_ALREADY_DETACHED'):
LOG.debug(_('VBD.unplug successful eventually.'))
@@ -714,9 +976,22 @@ def get_this_vm_ref(session):
return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
-def _stream_disk(dev, type, virtual_size, image_file):
+def _is_vdi_pv(dev):
+ LOG.debug(_("Running pygrub against %s"), dev)
+ output = os.popen('pygrub -qn /dev/%s' % dev)
+ for line in output.readlines():
+ #try to find kernel string
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
+ if m and m.group(0).find('xen') != -1:
+ LOG.debug(_("Found Xen kernel %s") % m.group(0))
+ return True
+ LOG.debug(_("No Xen kernel found. Booting HVM."))
+ return False
+
+
+def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
- if type == ImageType.DISK:
+ if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
@@ -735,13 +1010,132 @@ def _write_partition(virtual_size, dev):
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dest)s...') % locals())
- def execute(cmd, process_input=None, check_exit_code=True):
- return utils.execute(cmd=cmd,
- process_input=process_input,
- check_exit_code=check_exit_code)
+ def execute(*cmd, **kwargs):
+ return utils.execute(*cmd, **kwargs)
- execute('parted --script %s mklabel msdos' % dest)
- execute('parted --script %s mkpart primary %ds %ds' %
- (dest, primary_first, primary_last))
+ execute('sudo', 'parted', '--script', dest, 'mklabel', 'msdos')
+ execute('sudo', 'parted', '--script', dest, 'mkpart', 'primary',
+ '%ds' % primary_first,
+ '%ds' % primary_last)
LOG.debug(_('Writing partition table %s done.'), dest)
+
+
+def get_name_label_for_image(image):
+ # TODO(sirp): This should eventually be the URI for the Glance image
+ return _('Glance image %s') % image
+
+
+def _mount_filesystem(dev_path, dir):
+ """mounts the device specified by dev_path in dir"""
+ try:
+ out, err = utils.execute('sudo', 'mount',
+ '-t', 'ext2,ext3',
+ dev_path, dir)
+ except exception.ProcessExecutionError as e:
+ err = str(e)
+ return err
+
+
+def _find_guest_agent(base_dir, agent_rel_path):
+ """
+ tries to locate a guest agent at the path
+ specificed by agent_rel_path
+ """
+ agent_path = os.path.join(base_dir, agent_rel_path)
+ if os.path.isfile(agent_path):
+ # The presence of the guest agent
+ # file indicates that this instance can
+ # reconfigure the network from xenstore data,
+ # so manipulation of files in /etc is not
+ # required
+ LOG.info(_('XenServer tools installed in this '
+ 'image are capable of network injection. '
+ 'Networking files will not be'
+ 'manipulated'))
+ return True
+ xe_daemon_filename = os.path.join(base_dir,
+ 'usr', 'sbin', 'xe-daemon')
+ if os.path.isfile(xe_daemon_filename):
+ LOG.info(_('XenServer tools are present '
+ 'in this image but are not capable '
+ 'of network injection'))
+ else:
+ LOG.info(_('XenServer tools are not '
+ 'installed in this image'))
+ return False
+
+
+def _mounted_processing(device, key, net):
+ """Callback which runs with the image VDI attached"""
+
+ dev_path = '/dev/' + device + '1' # NB: Partition 1 hardcoded
+ tmpdir = tempfile.mkdtemp()
+ try:
+ # Mount only Linux filesystems, to avoid disturbing NTFS images
+ err = _mount_filesystem(dev_path, tmpdir)
+ if not err:
+ try:
+ # This try block ensures that the umount occurs
+ if not _find_guest_agent(tmpdir, FLAGS.xenapi_agent_path):
+ LOG.info(_('Manipulating interface files '
+ 'directly'))
+ disk.inject_data_into_fs(tmpdir, key, net,
+ utils.execute)
+ finally:
+ utils.execute('sudo', 'umount', dev_path)
+ else:
+ LOG.info(_('Failed to mount filesystem (expected for '
+ 'non-linux instances): %s') % err)
+ finally:
+ # remove temporary directory
+ os.rmdir(tmpdir)
+
+
+def _prepare_injectables(inst, networks_info):
+ """
+ prepares the ssh key and the network configuration file to be
+ injected into the disk image
+ """
+ #do the import here - Cheetah.Template will be loaded
+ #only if injection is performed
+ from Cheetah import Template as t
+ template = t.Template
+ template_data = open(FLAGS.injected_network_template).read()
+
+ key = str(inst['key_data'])
+ net = None
+ if networks_info:
+ ifc_num = -1
+ interfaces_info = []
+ have_injected_networks = False
+ for (network_ref, info) in networks_info:
+ ifc_num += 1
+ if not network_ref['injected']:
+ continue
+
+ have_injected_networks = True
+ ip_v4 = ip_v6 = None
+ if 'ips' in info and len(info['ips']) > 0:
+ ip_v4 = info['ips'][0]
+ if 'ip6s' in info and len(info['ip6s']) > 0:
+ ip_v6 = info['ip6s'][0]
+ if len(info['dns']) > 0:
+ dns = info['dns'][0]
+ interface_info = {'name': 'eth%d' % ifc_num,
+ 'address': ip_v4 and ip_v4['ip'] or '',
+ 'netmask': ip_v4 and ip_v4['netmask'] or '',
+ 'gateway': info['gateway'],
+ 'broadcast': info['broadcast'],
+ 'dns': dns,
+ 'address_v6': ip_v6 and ip_v6['ip'] or '',
+ 'netmask_v6': ip_v6 and ip_v6['netmask'] or '',
+ 'gateway_v6': ip_v6 and ip_v6['gateway'] or '',
+ 'use_ipv6': FLAGS.use_ipv6}
+ interfaces_info.append(interface_info)
+
+ if have_injected_networks:
+ net = str(template(template_data,
+ searchList=[{'interfaces': interfaces_info,
+ 'use_ipv6': FLAGS.use_ipv6}]))
+ return key, net
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 2aa0dde70..c96c35a6e 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -19,9 +19,11 @@
Management class for VM-related functions (spawn, reboot, etc).
"""
+import base64
import json
import M2Crypto
import os
+import pickle
import subprocess
import tempfile
import uuid
@@ -31,15 +33,18 @@ from nova import context
from nova import log as logging
from nova import exception
from nova import utils
+from nova import flags
from nova.auth.manager import AuthManager
from nova.compute import power_state
+from nova.virt import driver
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
XenAPI = None
LOG = logging.getLogger("nova.virt.xenapi.vmops")
+FLAGS = flags.FLAGS
class VMOps(object):
@@ -49,85 +54,153 @@ class VMOps(object):
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
+ self.poll_rescue_last_ran = None
VMHelper.XenAPI = self.XenAPI
def list_instances(self):
- """List VM instances"""
- vms = []
- for vm in self._session.get_xenapi().VM.get_all():
- rec = self._session.get_xenapi().VM.get_record(vm)
- if not rec["is_a_template"] and not rec["is_control_domain"]:
- vms.append(rec["name_label"])
- return vms
-
- def spawn(self, instance):
- """Create VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
- if vm is not None:
+ """List VM instances."""
+ # TODO(justinsb): Should we just always use the details method?
+ # Seems to be the same number of API calls..
+ vm_refs = []
+ for vm_ref in self._session.get_xenapi().VM.get_all():
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
+ vm_refs.append(vm_rec["name_label"])
+ return vm_refs
+
+ def list_instances_detail(self):
+ """List VM instances, returning InstanceInfo objects."""
+ instance_infos = []
+ for vm_ref in self._session.get_xenapi().VM.get_all():
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
+ name = vm_rec["name_label"]
+
+ # TODO(justinsb): This a roundabout way to map the state
+ openstack_format = VMHelper.compile_info(vm_rec)
+ state = openstack_format['state']
+
+ instance_info = driver.InstanceInfo(name, state)
+ instance_infos.append(instance_info)
+ return instance_infos
+
+ def revert_resize(self, instance):
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ self._start(instance, vm_ref)
+
+ def finish_resize(self, instance, disk_info):
+ vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
+ disk_info['cow'])
+ vm_ref = self._create_vm(instance, vdi_uuid)
+ self.resize_instance(instance, vdi_uuid)
+ self._spawn(instance, vm_ref)
+
+ def _start(self, instance, vm_ref=None):
+ """Power on a VM instance"""
+ if not vm_ref:
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ if vm_ref is None:
+ raise exception(_('Attempted to power on non-existent instance'
+ ' bad instance id %s') % instance.id)
+ LOG.debug(_("Starting instance %s"), instance.name)
+ self._session.call_xenapi('VM.start', vm_ref, False, False)
+
+ def _create_disk(self, instance):
+ user = AuthManager().get_user(instance.user_id)
+ project = AuthManager().get_project(instance.project_id)
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+ vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
+ instance.image_id, user, project, disk_image_type)
+ return vdi_uuid
+
+ def spawn(self, instance, network_info=None):
+ vdi_uuid = self._create_disk(instance)
+ vm_ref = self._create_vm(instance, vdi_uuid, network_info)
+ self._spawn(instance, vm_ref)
+
+ def spawn_rescue(self, instance):
+ """Spawn a rescue instance."""
+ self.spawn(instance)
+
+ def _create_vm(self, instance, vdi_uuid, network_info=None):
+ """Create VM instance."""
+ instance_name = instance.name
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is not None:
raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance.name)
+ ' non-unique name %s') % instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
- name = instance['name']
- LOG.exception(_('instance %(name)s: not enough free memory')
- % locals())
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- return
+ LOG.exception(_('instance %(instance_name)s: not enough free '
+ 'memory') % locals())
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ return
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- #if kernel is not present we must download a raw disk
- if instance.kernel_id:
- disk_image_type = ImageType.DISK
- else:
- disk_image_type = ImageType.DISK_RAW
- vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
- instance.image_id, user, project, disk_image_type)
+ # Are we building from a pre-existing disk?
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- #Have a look at the VDI and see if it has a PV kernel
- pv_kernel = False
- if not instance.kernel_id:
- pv_kernel = VMHelper.lookup_image(self._session, instance.id,
- vdi_ref)
+
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
- vm_ref = VMHelper.create_vm(self._session,
- instance, kernel, ramdisk, pv_kernel)
- VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
- # inject_network_info and create vifs
- networks = self.inject_network_info(instance)
- self.create_vifs(instance, networks)
+ use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
+ vdi_ref, disk_image_type, instance.os_type)
+ vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
+ use_pv_kernel)
+
+ VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=vdi_ref, userdevice=0, bootable=True)
+
+ # TODO(tr3buchet) - check to make sure we have network info, otherwise
+ # create it now. This goes away once nova-multi-nic hits.
+ if network_info is None:
+ network_info = self._get_network_info(instance)
+
+ # Alter the image before VM start for, e.g. network injection
+ if FLAGS.xenapi_inject_image:
+ VMHelper.preconfigure_instance(self._session, instance,
+ vdi_ref, network_info)
+ self.create_vifs(vm_ref, network_info)
+ self.inject_network_info(instance, vm_ref, network_info)
+ return vm_ref
+
+ def _spawn(self, instance, vm_ref):
+ """Spawn a new instance."""
LOG.debug(_('Starting VM %s...'), vm_ref)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
+ self._start(instance, vm_ref)
instance_name = instance.name
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
- % locals())
+ % locals())
- def _inject_onset_files():
- onset_files = instance.onset_files
- if onset_files:
+ def _inject_files():
+ injected_files = instance.injected_files
+ if injected_files:
# Check if this is a JSON-encoded string and convert if needed.
- if isinstance(onset_files, basestring):
+ if isinstance(injected_files, basestring):
try:
- onset_files = json.loads(onset_files)
+ injected_files = json.loads(injected_files)
except ValueError:
- LOG.exception(_("Invalid value for onset_files: '%s'")
- % onset_files)
- onset_files = []
+ LOG.exception(
+ _("Invalid value for injected_files: '%s'")
+ % injected_files)
+ injected_files = []
# Inject any files, if specified
- for path, contents in instance.onset_files:
+ for path, contents in instance.injected_files:
LOG.debug(_("Injecting file path: '%s'") % path)
self.inject_file(instance, path, contents)
# NOTE(armando): Do we really need to do this in virt?
@@ -137,18 +210,18 @@ class VMOps(object):
def _wait_for_boot():
try:
- state = self.get_info(instance['name'])['state']
+ state = self.get_info(instance_name)['state']
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- LOG.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
- _inject_onset_files()
+ _inject_files()
return True
except Exception, exc:
LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
+ instance_name)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -158,46 +231,55 @@ class VMOps(object):
timer.f = _wait_for_boot
# call to reset network to configure network from xenstore
- self.reset_network(instance)
+ self.reset_network(instance, vm_ref)
return timer.start(interval=0.5, now=True)
def _get_vm_opaque_ref(self, instance_or_vm):
- """Refactored out the common code of many methods that receive either
+ """
+ Refactored out the common code of many methods that receive either
a vm name or a vm instance, and want a vm instance in return.
"""
- vm = None
- try:
- if instance_or_vm.startswith("OpaqueRef:"):
- # Got passed an opaque ref; return it
+ # if instance_or_vm is a string it must be opaque ref or instance name
+ if isinstance(instance_or_vm, basestring):
+ obj = None
+ try:
+ # check for opaque ref
+ obj = self._session.get_xenapi().VM.get_uuid(instance_or_vm)
return instance_or_vm
- else:
- # Must be the instance name
+ except self.XenAPI.Failure:
+ # wasn't an opaque ref, can be an instance name
instance_name = instance_or_vm
- except (AttributeError, KeyError):
- # Note the the KeyError will only happen with fakes.py
- # Not a string; must be an ID or a vm instance
- if isinstance(instance_or_vm, (int, long)):
- ctx = context.get_admin_context()
- try:
- instance_obj = db.instance_get(ctx, instance_or_vm)
- instance_name = instance_obj.name
- except exception.NotFound:
- # The unit tests screw this up, as they use an integer for
- # the vm name. I'd fix that up, but that's a matter for
- # another bug report. So for now, just try with the passed
- # value
- instance_name = instance_or_vm
- else:
- instance_name = instance_or_vm.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
+
+ # if instance_or_vm is an int/long it must be instance id
+ elif isinstance(instance_or_vm, (int, long)):
+ ctx = context.get_admin_context()
+ instance_obj = db.instance_get(ctx, instance_or_vm)
+ instance_name = instance_obj.name
+ else:
+ instance_name = instance_or_vm.name
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
raise exception.NotFound(
_('Instance not present %s') % instance_name)
- return vm
+ return vm_ref
+
+ def _acquire_bootlock(self, vm):
+ """Prevent an instance from booting."""
+ self._session.call_xenapi(
+ "VM.set_blocked_operations",
+ vm,
+ {"start": ""})
+
+ def _release_bootlock(self, vm):
+ """Allow an instance to boot."""
+ self._session.call_xenapi(
+ "VM.remove_from_blocked_operations",
+ vm,
+ "start")
def snapshot(self, instance, image_id):
- """ Create snapshot from a running VM instance
+ """Create snapshot from a running VM instance.
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
@@ -217,8 +299,22 @@ class VMOps(object):
3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
that will bundle the VHDs together and then push the bundle into
Glance.
+
"""
+ template_vm_ref = None
+ try:
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ # call plugin to ship snapshot off to glance
+ VMHelper.upload_image(
+ self._session, instance, template_vdi_uuids, image_id)
+ finally:
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
+
+ logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+ def _get_snapshot(self, instance):
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
@@ -229,33 +325,116 @@ class VMOps(object):
try:
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
+ return template_vm_ref, template_vdi_uuids
except self.XenAPI.Failure, exc:
logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
% locals())
return
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Copies a VHD from one host machine to another.
+
+ :param instance: the instance that owns the VHD in question.
+ :param dest: the destination host machine.
+ :param disk_type: values are 'primary' or 'cow'.
+
+ """
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ # The primary VDI becomes the COW after the snapshot, and we can
+ # identify it via the VBD. The base copy is the parent_uuid returned
+ # from the snapshot creation
+
+ base_copy_uuid = cow_uuid = None
+ template_vdi_uuids = template_vm_ref = None
try:
- # call plugin to ship snapshot off to glance
- VMHelper.upload_image(
- self._session, instance.id, template_vdi_uuids, image_id)
+ # transfer the base copy
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ base_copy_uuid = template_vdi_uuids['image']
+ vdi_ref, vm_vdi_rec = \
+ VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
+ cow_uuid = vm_vdi_rec['uuid']
+
+ params = {'host': dest,
+ 'vdi_uuid': base_copy_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session)}
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now power down the instance and transfer the COW VHD
+ self._shutdown(instance, vm_ref, hard=False)
+
+ params = {'host': dest,
+ 'vdi_uuid': cow_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
finally:
- self._destroy(instance, template_vm_ref, shutdown=False)
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
- logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+ # TODO(mdietz): we could also consider renaming these to something
+ # sensible so we don't need to blindly pass around dictionaries
+ return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
+
+ def link_disks(self, instance, base_copy_uuid, cow_uuid):
+ """Links the base copy VHD to the COW via the XAPI plugin."""
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ new_base_copy_uuid = str(uuid.uuid4())
+ new_cow_uuid = str(uuid.uuid4())
+ params = {'instance_id': instance.id,
+ 'old_base_copy_uuid': base_copy_uuid,
+ 'old_cow_uuid': cow_uuid,
+ 'new_base_copy_uuid': new_base_copy_uuid,
+ 'new_cow_uuid': new_cow_uuid,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration',
+ 'move_vhds_into_sr', {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now we rescan the SR so we find the VHDs
+ VMHelper.scan_default_sr(self._session)
+
+ return new_cow_uuid
+
+ def resize_instance(self, instance, vdi_uuid):
+ """Resize a running instance by changing it's RAM and disk size."""
+ #TODO(mdietz): this will need to be adjusted for swap later
+ #The new disk size must be in bytes
+
+ new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024)
+ instance_name = instance.name
+ instance_local_gb = instance.local_gb
+ LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s."
+ " Expanding to %(instance_local_gb)d GB") % locals())
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size)
+ LOG.debug(_("Resize instance %s complete") % (instance.name))
def reboot(self, instance):
- """Reboot VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
- self._session.wait_for_task(instance.id, task)
+ """Reboot VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
+ self._session.wait_for_task(task, instance.id)
def set_admin_password(self, instance, new_pass):
- """Set the root/admin password on the VM instance. This is done via
- an agent running on the VM. Communication between nova and the agent
- is done via writing xenstore records. Since communication is done over
- the XenAPI RPC calls, we need to encrypt the password. We're using a
- simple Diffie-Hellman class instead of the more advanced one in
- M2Crypto for compatibility with the agent code.
+ """Set the root/admin password on the VM instance.
+
+ This is done via an agent running on the VM. Communication between nova
+ and the agent is done via writing xenstore records. Since communication
+ is done over the XenAPI RPC calls, we need to encrypt the password.
+ We're using a simple Diffie-Hellman class instead of the more advanced
+ one in M2Crypto for compatibility with the agent code.
+
"""
# Need to uniquely identify this request.
transaction_id = str(uuid.uuid4())
@@ -287,17 +466,19 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def inject_file(self, instance, b64_path, b64_contents):
- """Write a file to the VM instance. The path to which it is to be
- written and the contents of the file need to be supplied; both should
- be base64-encoded to prevent errors with non-ASCII characters being
- transmitted. If the agent does not support file injection, or the user
- has disabled it, a NotImplementedError will be raised.
+ def inject_file(self, instance, path, contents):
+ """Write a file to the VM instance.
+
+ The path to which it is to be written and the contents of the file
+ need to be supplied; both will be base64-encoded to prevent errors
+ with non-ASCII characters being transmitted. If the agent does not
+ support file injection, or the user has disabled it, a
+ NotImplementedError will be raised.
+
"""
- # Files/paths *should* be base64-encoded at this point, but
- # double-check to make sure.
- b64_path = utils.ensure_b64_encoding(b64_path)
- b64_contents = utils.ensure_b64_encoding(b64_contents)
+ # Files/paths must be base64-encoded for transmission to agent
+ b64_path = base64.b64encode(path)
+ b64_contents = base64.b64encode(contents)
# Need to uniquely identify this request.
transaction_id = str(uuid.uuid4())
@@ -313,217 +494,398 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _shutdown(self, instance, vm):
- """Shutdown an instance """
+ def _shutdown(self, instance, vm_ref, hard=True):
+ """Shutdown an instance."""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
- LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
- locals())
+ instance_name = instance.name
+ LOG.warn(_("VM %(instance_name)s already halted,"
+ "skipping shutdown...") % locals())
return
+ instance_id = instance.id
+ LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
+ % locals())
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
- self._session.wait_for_task(instance.id, task)
+ task = None
+ if hard:
+ task = self._session.call_xenapi("Async.VM.hard_shutdown",
+ vm_ref)
+ else:
+ task = self._session.call_xenapi("Async.VM.clean_shutdown",
+ vm_ref)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- def _destroy_vdis(self, instance, vm):
- """Destroys all VDIs associated with a VM """
- vdis = VMHelper.lookup_vm_vdis(self._session, vm)
+ def _shutdown_rescue(self, rescue_vm_ref):
+ """Shutdown a rescue instance."""
+ self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm_ref)
+
+ def _destroy_vdis(self, instance, vm_ref):
+ """Destroys all VDIs associated with a VM."""
+ instance_id = instance.id
+ LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
+ % locals())
+ vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref)
- if not vdis:
+ if not vdi_refs:
return
- for vdi in vdis:
+ for vdi_ref in vdi_refs:
try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- def _destroy_vm(self, instance, vm):
- """Destroys a VM record """
+ def _destroy_rescue_vdis(self, rescue_vm_ref):
+ """Destroys all VDIs associated with a rescued VM."""
+ vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
+ for vdi_ref in vdi_refs:
+ try:
+ self._session.call_xenapi("Async.VDI.destroy", vdi_ref)
+ except self.XenAPI.Failure:
+ continue
+
+ def _destroy_rescue_vbds(self, rescue_vm_ref):
+ """Destroys all VBDs tied to a rescue VM."""
+ vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
+ for vbd_ref in vbd_refs:
+ vbd_rec = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if vbd_rec.get("userdevice", None) == "1": # VBD is always 1
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+
+ def _destroy_kernel_ramdisk(self, instance, vm_ref):
+ """Three situations can occur:
+
+ 1. We have neither a ramdisk nor a kernel, in which case we are a
+ RAW image and can omit this step
+
+ 2. We have one or the other, in which case, we should flag as an
+ error
+
+ 3. We have both, in which case we safely remove both the kernel
+ and the ramdisk.
+
+ """
+ instance_id = instance.id
+ if not instance.kernel_id and not instance.ramdisk_id:
+ # 1. No kernel or ramdisk
+ LOG.debug(_("Instance %(instance_id)s using RAW or VHD, "
+ "skipping kernel and ramdisk deletion") % locals())
+ return
+
+ if not (instance.kernel_id and instance.ramdisk_id):
+ # 2. We only have kernel xor ramdisk
+ raise exception.NotFound(
+ _("Instance %(instance_id)s has a kernel or ramdisk but not "
+ "both" % locals()))
+
+ # 3. We have both kernel and ramdisk
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
+ vm_ref)
+
+ LOG.debug(_("Removing kernel/ramdisk files"))
+
+ args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(task, instance.id)
+
+ LOG.debug(_("kernel/ramdisk files removed"))
+
+ def _destroy_vm(self, instance, vm_ref):
+ """Destroys a VM record."""
+ instance_id = instance.id
try:
- kernel = None
- ramdisk = None
- if instance.kernel_id or instance.ramdisk_id:
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
- self._session, vm)
- task1 = self._session.call_xenapi('Async.VM.destroy', vm)
- LOG.debug(_("Removing kernel/ramdisk files"))
- fn = "remove_kernel_ramdisk"
- args = {}
- if kernel:
- args['kernel-file'] = kernel
- if ramdisk:
- args['ramdisk-file'] = ramdisk
- task2 = self._session.async_call_plugin('glance', fn, args)
- self._session.wait_for_task(instance.id, task1)
- self._session.wait_for_task(instance.id, task2)
- LOG.debug(_("kernel/ramdisk files removed"))
+ task = self._session.call_xenapi('Async.VM.destroy', vm_ref)
+ self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
+
+ def _destroy_rescue_instance(self, rescue_vm_ref):
+ """Destroy a rescue instance."""
+ self._destroy_rescue_vbds(rescue_vm_ref)
+ self._shutdown_rescue(rescue_vm_ref)
+ self._destroy_rescue_vdis(rescue_vm_ref)
+
+ self._session.call_xenapi("Async.VM.destroy", rescue_vm_ref)
+
def destroy(self, instance):
- """
- Destroy VM instance
+ """Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
- """
- vm = VMHelper.lookup(self._session, instance.name)
- return self._destroy(instance, vm, shutdown=True)
- def _destroy(self, instance, vm, shutdown=True):
"""
- Destroys VM instance by performing:
+ instance_id = instance.id
+ LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm_ref, shutdown=True)
+
+ def _destroy(self, instance, vm_ref, shutdown=True,
+ destroy_kernel_ramdisk=True):
+ """Destroys VM instance by performing:
+
+ 1. A shutdown if requested.
+ 2. Destroying associated VDIs.
+ 3. Destroying kernel and ramdisk files (if necessary).
+ 4. Destroying that actual VM record.
- 1. A shutdown if requested
- 2. Destroying associated VDIs
- 3. Destroying that actual VM record
"""
- if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ if vm_ref is None:
+ LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
- self._shutdown(instance, vm)
+ self._shutdown(instance, vm_ref)
- self._destroy_vdis(instance, vm)
- self._destroy_vm(instance, vm)
+ self._destroy_vdis(instance, vm_ref)
+ if destroy_kernel_ramdisk:
+ self._destroy_kernel_ramdisk(instance, vm_ref)
+ self._destroy_vm(instance, vm_ref)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
callback(ret)
def pause(self, instance, callback):
- """Pause VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.pause', vm)
+ """Pause VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.pause', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def unpause(self, instance, callback):
- """Unpause VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.unpause', vm)
+ """Unpause VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.unpause', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def suspend(self, instance, callback):
- """suspend the specified instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.suspend', vm)
+ """Suspend the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.suspend', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def resume(self, instance, callback):
- """resume the specified instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
+ """Resume the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.resume', vm_ref, False,
+ True)
self._wait_with_callback(instance.id, task, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance.
+
+ - shutdown the instance VM.
+ - set 'bootlock' to prevent the instance from starting in rescue.
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue).
+
+ """
+ rescue_vm_ref = VMHelper.lookup(self._session,
+ "%s-rescue" % instance.name)
+ if rescue_vm_ref:
+ raise RuntimeError(_(
+ "Instance is already in Rescue Mode: %s" % instance.name))
+
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ self._shutdown(instance, vm_ref)
+ self._acquire_bootlock(vm_ref)
+ instance._rescue = True
+ self.spawn_rescue(instance)
+ rescue_vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
+ rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
+ vdi_ref, 1, False)
+
+ self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance.
+
+ - unplug the instance VM's disk from the rescue VM.
+ - teardown the rescue VM.
+ - release the bootlock to allow the instance VM to start.
+
+ """
+ rescue_vm_ref = VMHelper.lookup(self._session,
+ "%s-rescue" % instance.name)
+
+ if not rescue_vm_ref:
+ raise exception.NotFound(_(
+ "Instance is not in Rescue Mode: %s" % instance.name))
+
+ original_vm_ref = VMHelper.lookup(self._session, instance.name)
+ instance._rescue = False
+
+ self._destroy_rescue_instance(rescue_vm_ref)
+ self._release_bootlock(original_vm_ref)
+ self._start(instance, original_vm_ref)
+
+ def poll_rescued_instances(self, timeout):
+ """Look for expirable rescued instances.
+
+ - forcibly exit rescue mode for any instances that have been
+ in rescue mode for >= the provided timeout
+
+ """
+ last_ran = self.poll_rescue_last_ran
+ if not last_ran:
+ # We need a base time to start tracking.
+ self.poll_rescue_last_ran = utils.utcnow()
+ return
+
+ if not utils.is_older_than(last_ran, timeout):
+ # Do not run. Let's bail.
+ return
+
+ # Update the time tracker and proceed.
+ self.poll_rescue_last_ran = utils.utcnow()
+
+ rescue_vms = []
+ for instance in self.list_instances():
+ if instance.endswith("-rescue"):
+ rescue_vms.append(dict(name=instance,
+ vm_ref=VMHelper.lookup(self._session,
+ instance)))
+
+ for vm in rescue_vms:
+ rescue_name = vm["name"]
+ rescue_vm_ref = vm["vm_ref"]
+
+ self._destroy_rescue_instance(rescue_vm_ref)
+
+ original_name = vm["name"].split("-rescue", 1)[0]
+ original_vm_ref = VMHelper.lookup(self._session, original_name)
+
+ self._release_bootlock(original_vm_ref)
+ self._session.call_xenapi("VM.start", original_vm_ref, False,
+ False)
+
def get_info(self, instance):
- """Return data about VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- rec = self._session.get_xenapi().VM.get_record(vm)
- return VMHelper.compile_info(rec)
+ """Return data about VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ return VMHelper.compile_info(vm_rec)
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
- vm = self._get_vm_opaque_ref(instance)
- rec = self._session.get_xenapi().VM.get_record(vm)
- return VMHelper.compile_diagnostics(self._session, rec)
+ """Return data about VM diagnostics."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ return VMHelper.compile_diagnostics(self._session, vm_rec)
def get_console_output(self, instance):
- """Return snapshot of console"""
+ """Return snapshot of console."""
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_ajax_console(self, instance):
- """Return link to instance's ajax console"""
+ """Return link to instance's ajax console."""
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
- def inject_network_info(self, instance):
- """
- Generate the network info and make calls to place it into the
- xenstore and the xenstore param list
-
- """
- # TODO(tr3buchet) - remove comment in multi-nic
- # I've decided to go ahead and consider multiple IPs and networks
- # at this stage even though they aren't implemented because these will
- # be needed for multi-nic and there was no sense writing it for single
- # network/single IP and then having to turn around and re-write it
- vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
- logging.debug(_("injecting network info to xenstore for vm: |%s|"),
- vm_opaque_ref)
+ # TODO(tr3buchet) - remove this function after nova multi-nic
+ def _get_network_info(self, instance):
+ """Creates network info list for instance."""
admin_context = context.get_admin_context()
- IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
+ IPs = db.fixed_ip_get_all_by_instance(admin_context,
+ instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
+ flavor = db.instance_type_get_by_name(admin_context,
+ instance['instance_type'])
+ network_info = []
for network in networks:
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
def ip_dict(ip):
- return {'netmask': network['netmask'],
- 'enabled': '1',
- 'ip': ip.address}
-
- mac_id = instance.mac_address.replace(':', '')
- location = 'vm-data/networking/%s' % mac_id
- mapping = {'label': network['label'],
- 'gateway': network['gateway'],
- 'mac': instance.mac_address,
- 'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
- self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
+ return {
+ "ip": ip.address,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict(ip6):
+ return {
+ "ip": utils.to_global_ipv6(network['cidr_v6'],
+ instance['mac_address']),
+ "netmask": network['netmask_v6'],
+ "gateway": network['gateway_v6'],
+ "enabled": "1"}
+
+ info = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
+ 'mac': instance.mac_address,
+ 'rxtx_cap': flavor['rxtx_cap'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs]}
+ if network['cidr_v6']:
+ info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
+ network_info.append((network, info))
+ return network_info
+
+ def inject_network_info(self, instance, vm_ref, network_info):
+ """
+ Generate the network info and make calls to place it into the
+ xenstore and the xenstore param list.
+ """
+ logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
+
+ # this function raises if vm_ref is not a vm_opaque_ref
+ self._session.get_xenapi().VM.get_record(vm_ref)
+
+ for (network, info) in network_info:
+ location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
+ self.write_to_param_xenstore(vm_ref, {location: info})
try:
- self.write_to_xenstore(vm_opaque_ref, location,
- mapping['location'])
+ # TODO(tr3buchet): fix function call after refactor
+ #self.write_to_xenstore(vm_ref, location, info)
+ self._make_plugin_call('xenstore.py', 'write_record', instance,
+ location, {'value': json.dumps(info)},
+ vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
- return networks
+ def create_vifs(self, vm_ref, network_info):
+ """Creates vifs for an instance."""
+ logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref)
- def create_vifs(self, instance, networks=None):
- """
- Creates vifs for an instance
+ # this function raises if vm_ref is not a vm_opaque_ref
+ self._session.get_xenapi().VM.get_record(vm_ref)
- """
- vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
- logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref)
- if networks is None:
- networks = db.network_get_all_by_instance(admin_context,
- instance['id'])
- # TODO(tr3buchet) - remove comment in multi-nic
- # this bit here about creating the vifs will be updated
- # in multi-nic to handle multiple IPs on the same network
- # and multiple networks
- # for now it works as there is only one of each
- for network in networks:
+ for device, (network, info) in enumerate(network_info):
+ mac_address = info['mac']
bridge = network['bridge']
+ rxtx_cap = info.pop('rxtx_cap')
network_ref = \
- NetworkHelper.find_network_with_bridge(self._session, bridge)
+ NetworkHelper.find_network_with_bridge(self._session,
+ bridge)
+ VMHelper.create_vif(self._session, vm_ref, network_ref,
+ mac_address, device, rxtx_cap)
- if network_ref:
- VMHelper.create_vif(self._session, vm_opaque_ref,
- network_ref, instance.mac_address)
-
- def reset_network(self, instance):
- """
- Creates uuid arg to pass to make_agent_call and calls it.
-
- """
+ def reset_network(self, instance, vm_ref):
+ """Creates uuid arg to pass to make_agent_call and calls it."""
args = {'id': str(uuid.uuid4())}
- resp = self._make_agent_call('resetnetwork', instance, '', args)
+ # TODO(tr3buchet): fix function call after refactor
+ #resp = self._make_agent_call('resetnetwork', instance, '', args)
+ resp = self._make_plugin_call('agent', 'resetnetwork', instance, '',
+ args, vm_ref)
def list_from_xenstore(self, vm, path):
- """Runs the xenstore-ls command to get a listing of all records
+ """
+ Runs the xenstore-ls command to get a listing of all records
from 'path' downward. Returns a dict with the sub-paths as keys,
and the value stored in those paths as values. If nothing is
found at that path, returns None.
@@ -532,7 +894,8 @@ class VMOps(object):
return json.loads(ret)
def read_from_xenstore(self, vm, path):
- """Returns the value stored in the xenstore record for the given VM
+ """
+ Returns the value stored in the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the read process.
"""
@@ -548,7 +911,8 @@ class VMOps(object):
return ret
def write_to_xenstore(self, vm, path, value):
- """Writes the passed value to the xenstore record for the given VM
+ """
+ Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
@@ -556,33 +920,36 @@ class VMOps(object):
{'value': json.dumps(value)})
def clear_xenstore(self, vm, path):
- """Deletes the VM's xenstore record for the specified path.
+ """
+ Deletes the VM's xenstore record for the specified path.
If there is no such record, the request is ignored.
"""
self._make_xenstore_call('delete_record', vm, path)
- def _make_xenstore_call(self, method, vm, path, addl_args={}):
+ def _make_xenstore_call(self, method, vm, path, addl_args=None):
"""Handles calls to the xenstore xenapi plugin."""
return self._make_plugin_call('xenstore.py', method=method, vm=vm,
path=path, addl_args=addl_args)
- def _make_agent_call(self, method, vm, path, addl_args={}):
+ def _make_agent_call(self, method, vm, path, addl_args=None):
"""Abstracts out the interaction with the agent xenapi plugin."""
return self._make_plugin_call('agent', method=method, vm=vm,
path=path, addl_args=addl_args)
- def _make_plugin_call(self, plugin, method, vm, path, addl_args={}):
- """Abstracts out the process of calling a method of a xenapi plugin.
+ def _make_plugin_call(self, plugin, method, vm, path, addl_args=None,
+ vm_ref=None):
+ """
+ Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
instance_id = vm.id
- vm = self._get_vm_opaque_ref(vm)
- rec = self._session.get_xenapi().VM.get_record(vm)
- args = {'dom_id': rec['domid'], 'path': path}
- args.update(addl_args)
+ vm_ref = vm_ref or self._get_vm_opaque_ref(vm)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ args = {'dom_id': vm_rec['domid'], 'path': path}
+ args.update(addl_args or {})
try:
task = self._session.async_call_plugin(plugin, method, args)
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, e:
ret = None
err_trace = e.details[-1]
@@ -602,7 +969,8 @@ class VMOps(object):
return ret
def add_to_xenstore(self, vm, path, key, value):
- """Adds the passed key/value pair to the xenstore record for
+ """
+ Adds the passed key/value pair to the xenstore record for
the given VM at the specified location. A XenAPIPlugin.PluginError
will be raised if any error is encountered in the write process.
"""
@@ -615,7 +983,8 @@ class VMOps(object):
self.write_to_xenstore(vm, path, current)
def remove_from_xenstore(self, vm, path, key_or_keys):
- """Takes either a single key or a list of keys and removes
+ """
+ Takes either a single key or a list of keys and removes
them from the xenstoreirecord data for the given VM.
If the key doesn't exist, the request is ignored.
"""
@@ -642,7 +1011,8 @@ class VMOps(object):
###### names to distinguish them. (dabo)
########################################################################
def read_partial_from_param_xenstore(self, instance_or_vm, key_prefix):
- """Returns a dict of all the keys in the xenstore parameter record
+ """
+ Returns a dict of all the keys in the xenstore parameter record
for the given instance that begin with the key_prefix.
"""
data = self.read_from_param_xenstore(instance_or_vm)
@@ -653,14 +1023,15 @@ class VMOps(object):
return data
def read_from_param_xenstore(self, instance_or_vm, keys=None):
- """Returns the xenstore parameter record data for the specified VM
+ """
+ Returns the xenstore parameter record data for the specified VM
instance as a dict. Accepts an optional key or list of keys; if a
value for 'keys' is passed, the returned dict is filtered to only
return the values for those keys.
"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
data = self._session.call_xenapi_request('VM.get_xenstore_data',
- (vm, ))
+ (vm_ref,))
ret = {}
if keys is None:
keys = data.keys()
@@ -675,17 +1046,20 @@ class VMOps(object):
return ret
def add_to_param_xenstore(self, instance_or_vm, key, val):
- """Takes a key/value pair and adds it to the xenstore parameter
+ """
+ Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
- it is overwritten"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ it is overwritten
+ """
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
self.remove_from_param_xenstore(instance_or_vm, key)
jsonval = json.dumps(val)
self._session.call_xenapi_request('VM.add_to_xenstore_data',
- (vm, key, jsonval))
+ (vm_ref, key, jsonval))
def write_to_param_xenstore(self, instance_or_vm, mapping):
- """Takes a dict and writes each key/value pair to the xenstore
+ """
+ Takes a dict and writes each key/value pair to the xenstore
parameter record for the given vm instance. Any existing data for
those keys is overwritten.
"""
@@ -693,18 +1067,19 @@ class VMOps(object):
self.add_to_param_xenstore(instance_or_vm, k, v)
def remove_from_param_xenstore(self, instance_or_vm, key_or_keys):
- """Takes either a single key or a list of keys and removes
+ """
+ Takes either a single key or a list of keys and removes
them from the xenstore parameter record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
if isinstance(key_or_keys, basestring):
keys = [key_or_keys]
else:
keys = key_or_keys
for key in keys:
self._session.call_xenapi_request('VM.remove_from_xenstore_data',
- (vm, key))
+ (vm_ref, key))
def clear_param_xenstore(self, instance_or_vm):
"""Removes all data from the xenstore parameter record for this VM."""
@@ -719,7 +1094,8 @@ def _runproc(cmd):
class SimpleDH(object):
- """This class wraps all the functionality needed to implement
+ """
+ This class wraps all the functionality needed to implement
basic Diffie-Hellman-Merkle key exchange in Python. It features
intelligent defaults for the prime and base numbers needed for the
calculation, while allowing you to supply your own. It requires that
@@ -728,7 +1104,8 @@ class SimpleDH(object):
is not available, a RuntimeError will be raised.
"""
def __init__(self, prime=None, base=None, secret=None):
- """You can specify the values for prime and base if you wish;
+ """
+ You can specify the values for prime and base if you wish;
otherwise, reasonable default values will be used.
"""
if prime is None:
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index d5ebd29d5..72284ac02 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -117,16 +117,16 @@ class VolumeHelper(HelperBase):
def introduce_vdi(cls, session, sr_ref):
"""Introduce VDI in the host"""
try:
- vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
+ vdi_refs = session.get_xenapi().SR.get_VDIs(sr_ref)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
- vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_refs[0])
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get record'
- ' of VDI %s on') % vdis[0])
+ ' of VDI %s on') % vdi_refs[0])
else:
try:
return session.get_xenapi().VDI.introduce(
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index d89a6f995..757ecf5ad 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -83,7 +83,7 @@ class VolumeOps(object):
try:
task = self._session.call_xenapi('Async.VBD.plug',
vbd_ref)
- self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ self._session.wait_for_task(task, vol_rec['deviceNumber'])
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index fc56a4bae..99fd35c61 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -49,6 +49,12 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
+
+**Variable Naming Scheme**
+
+- suffix "_ref" for opaque references
+- suffix "_uuid" for UUIDs
+- suffix "_rec" for record objects
"""
import sys
@@ -63,6 +69,7 @@ from nova import db
from nova import utils
from nova import flags
from nova import log as logging
+from nova.virt import driver
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
@@ -100,6 +107,22 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts',
5,
'Max number of times to poll for VHD to coalesce.'
' Used only if connection_type=xenapi.')
+flags.DEFINE_bool('xenapi_inject_image',
+ True,
+ 'Specifies whether an attempt to inject network/key'
+ ' data into the disk image should be made.'
+ ' Used only if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_agent_path',
+ 'usr/sbin/xe-update-networking',
+ 'Specifies the path in which the xenapi guest agent'
+ ' should be located. If the agent is present,'
+ ' network configuration is not injected into the image'
+ ' Used only if connection_type=xenapi.'
+ ' and xenapi_inject_image=True')
+
+flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount',
+ 'Base path to the storage repository')
+
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -133,10 +156,11 @@ def get_connection(_):
return XenAPIConnection(url, username, password)
-class XenAPIConnection(object):
+class XenAPIConnection(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform"""
def __init__(self, url, user, pw):
+ super(XenAPIConnection, self).__init__()
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
@@ -152,10 +176,21 @@ class XenAPIConnection(object):
"""List VM instances"""
return self._vmops.list_instances()
+ def list_instances_detail(self):
+ return self._vmops.list_instances_detail()
+
def spawn(self, instance):
"""Create VM instance"""
self._vmops.spawn(instance)
+ def revert_resize(self, instance):
+ """Reverts a resize, powering back on the instance"""
+ self._vmops.revert_resize(instance)
+
+ def finish_resize(self, instance, disk_info):
+ """Completes a resize, turning on the migrated instance"""
+ self._vmops.finish_resize(instance, disk_info)
+
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance """
self._vmops.snapshot(instance, image_id)
@@ -186,6 +221,11 @@ class XenAPIConnection(object):
"""Unpause paused VM instance"""
self._vmops.unpause(instance, callback)
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Transfers the VHD of a running instance to another host, then shuts
+ off the instance copies over the COW disk"""
+ return self._vmops.migrate_disk_and_power_off(instance, dest)
+
def suspend(self, instance, callback):
"""suspend the specified instance"""
self._vmops.suspend(instance, callback)
@@ -194,6 +234,18 @@ class XenAPIConnection(object):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ self._vmops.rescue(instance, callback)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ self._vmops.unrescue(instance, callback)
+
+ def poll_rescued_instances(self, timeout):
+ """Poll for rescued instances"""
+ self._vmops.poll_rescued_instances(timeout)
+
def reset_network(self, instance):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
@@ -218,6 +270,10 @@ class XenAPIConnection(object):
"""Return link to instance's ajax console"""
return self._vmops.get_ajax_console(instance)
+ def get_host_ip_addr(self):
+ xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ return xs_url.netloc
+
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
@@ -234,6 +290,27 @@ class XenAPIConnection(object):
'username': FLAGS.xenapi_connection_username,
'password': FLAGS.xenapi_connection_password}
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
+
+ def compare_cpu(self, xml):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only libvirt."""
+ return
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only by libvirt."""
+ return
+
+ def unfilter_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -277,7 +354,7 @@ class XenAPISession(object):
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
- def wait_for_task(self, id, task):
+ def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
until it completes. Not re-entrant."""
done = event.Event()
@@ -304,10 +381,11 @@ class XenAPISession(object):
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
if status == "pending":
return
elif status == "success":
@@ -321,7 +399,9 @@ class XenAPISession(object):
LOG.warn(_("Task [%(name)s] %(task)s status:"
" %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
- db.instance_action_create(context.get_admin_context(), action)
+
+ if id:
+ db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
LOG.warn(exc)
done.send_exception(*sys.exc_info())
diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py
new file mode 100644
index 000000000..b5b00e44e
--- /dev/null
+++ b/nova/vnc/__init__.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module for VNC Proxying."""
+
+from nova import flags
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vncproxy_topic', 'vncproxy',
+ 'the topic vnc proxy nodes listen on')
+flags.DEFINE_string('vncproxy_url',
+ 'http://127.0.0.1:6080',
+ 'location of vnc console proxy, \
+ in the form "http://127.0.0.1:6080"')
+flags.DEFINE_string('vncserver_host', '0.0.0.0',
+ 'the host interface on which vnc server should listen')
+flags.DEFINE_bool('vnc_enabled', True,
+ 'enable vnc related features')
diff --git a/nova/vnc/auth.py b/nova/vnc/auth.py
new file mode 100644
index 000000000..ce5e10388
--- /dev/null
+++ b/nova/vnc/auth.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Auth Components for VNC Console."""
+
+import time
+import urlparse
+import webob
+
+from webob import Request
+
+from nova import context
+from nova import flags
+from nova import log as logging
+from nova import manager
+from nova import rpc
+from nova import utils
+from nova import wsgi
+from nova import vnc
+
+
+LOG = logging.getLogger('nova.vnc-proxy')
+FLAGS = flags.FLAGS
+
+
+class VNCNovaAuthMiddleware(object):
+ """Implementation of Middleware to Handle Nova Auth."""
+
+ def __init__(self, app):
+ self.app = app
+ self.token_cache = {}
+ utils.LoopingCall(self.delete_expired_cache_items).start(1)
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ token = req.params.get('token')
+
+ if not token:
+ referrer = req.environ.get('HTTP_REFERER')
+ auth_params = urlparse.parse_qs(urlparse.urlparse(referrer).query)
+ if 'token' in auth_params:
+ token = auth_params['token'][0]
+
+ connection_info = self.get_token_info(token)
+ if not connection_info:
+ LOG.audit(_("Unauthorized Access: (%s)"), req.environ)
+ return webob.exc.HTTPForbidden(detail='Unauthorized')
+
+ if req.path == vnc.proxy.WS_ENDPOINT:
+ req.environ['vnc_host'] = connection_info['host']
+ req.environ['vnc_port'] = int(connection_info['port'])
+
+ return req.get_response(self.app)
+
+ def get_token_info(self, token):
+ if token in self.token_cache:
+ return self.token_cache[token]
+
+ rval = rpc.call(context.get_admin_context(),
+ FLAGS.vncproxy_topic,
+ {"method": "check_token", "args": {'token': token}})
+ if rval:
+ self.token_cache[token] = rval
+ return rval
+
+ def delete_expired_cache_items(self):
+ now = time.time()
+ to_delete = []
+ for k, v in self.token_cache.items():
+ if now - v['last_activity_at'] > FLAGS.vnc_token_ttl:
+ to_delete.append(k)
+
+ for k in to_delete:
+ del self.token_cache[k]
+
+
+class LoggingMiddleware(object):
+ """Middleware for basic vnc-specific request logging."""
+
+ def __init__(self, app):
+ self.app = app
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ if req.path == vnc.proxy.WS_ENDPOINT:
+ LOG.info(_("Received Websocket Request: %s"), req.url)
+ else:
+ LOG.info(_("Received Request: %s"), req.url)
+
+ return req.get_response(self.app)
+
+
+class VNCProxyAuthManager(manager.Manager):
+ """Manages token based authentication."""
+
+ def __init__(self, scheduler_driver=None, *args, **kwargs):
+ super(VNCProxyAuthManager, self).__init__(*args, **kwargs)
+ self.tokens = {}
+ utils.LoopingCall(self._delete_expired_tokens).start(1)
+
+ def authorize_vnc_console(self, context, token, host, port):
+ self.tokens[token] = {'host': host,
+ 'port': port,
+ 'last_activity_at': time.time()}
+ token_dict = self.tokens[token]
+ LOG.audit(_("Received Token: %(token)s, %(token_dict)s)"), locals())
+
+ def check_token(self, context, token):
+ token_valid = token in self.tokens
+ LOG.audit(_("Checking Token: %(token)s, %(token_valid)s)"), locals())
+ if token_valid:
+ return self.tokens[token]
+
+ def _delete_expired_tokens(self):
+ now = time.time()
+ to_delete = []
+ for k, v in self.tokens.items():
+ if now - v['last_activity_at'] > FLAGS.vnc_token_ttl:
+ to_delete.append(k)
+
+ for k in to_delete:
+ LOG.audit(_("Deleting Expired Token: %s)"), k)
+ del self.tokens[k]
diff --git a/nova/vnc/proxy.py b/nova/vnc/proxy.py
new file mode 100644
index 000000000..c4603803b
--- /dev/null
+++ b/nova/vnc/proxy.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Eventlet WSGI Services to proxy VNC. No nova deps."""
+
+import base64
+import os
+
+import eventlet
+from eventlet import wsgi
+from eventlet import websocket
+
+import webob
+
+
+WS_ENDPOINT = '/data'
+
+
+class WebsocketVNCProxy(object):
+ """Class to proxy from websocket to vnc server."""
+
+ def __init__(self, wwwroot):
+ self.wwwroot = wwwroot
+ self.whitelist = {}
+ for root, dirs, files in os.walk(wwwroot):
+ hidden_dirs = []
+ for d in dirs:
+ if d.startswith('.'):
+ hidden_dirs.append(d)
+ for d in hidden_dirs:
+ dirs.remove(d)
+ for name in files:
+ if not str(name).startswith('.'):
+ filename = os.path.join(root, name)
+ self.whitelist[filename] = True
+
+ def get_whitelist(self):
+ return self.whitelist.keys()
+
+ def sock2ws(self, source, dest):
+ try:
+ while True:
+ d = source.recv(32384)
+ if d == '':
+ break
+ d = base64.b64encode(d)
+ dest.send(d)
+ except:
+ source.close()
+ dest.close()
+
+ def ws2sock(self, source, dest):
+ try:
+ while True:
+ d = source.wait()
+ if d is None:
+ break
+ d = base64.b64decode(d)
+ dest.sendall(d)
+ except:
+ source.close()
+ dest.close()
+
+ def proxy_connection(self, environ, start_response):
+ @websocket.WebSocketWSGI
+ def _handle(client):
+ server = eventlet.connect((client.environ['vnc_host'],
+ client.environ['vnc_port']))
+ t1 = eventlet.spawn(self.ws2sock, client, server)
+ t2 = eventlet.spawn(self.sock2ws, server, client)
+ t1.wait()
+ t2.wait()
+ _handle(environ, start_response)
+
+ def __call__(self, environ, start_response):
+ req = webob.Request(environ)
+ if req.path == WS_ENDPOINT:
+ return self.proxy_connection(environ, start_response)
+ else:
+ if req.path == '/':
+ fname = '/vnc_auto.html'
+ else:
+ fname = req.path
+
+ fname = (self.wwwroot + fname).replace('//', '/')
+ if not fname in self.whitelist:
+ start_response('404 Not Found',
+ [('content-type', 'text/html')])
+ return "Not Found"
+
+ base, ext = os.path.splitext(fname)
+ if ext == '.js':
+ mimetype = 'application/javascript'
+ elif ext == '.css':
+ mimetype = 'text/css'
+ elif ext in ['.svg', '.jpg', '.png', '.gif']:
+ mimetype = 'image'
+ else:
+ mimetype = 'text/html'
+
+ start_response('200 OK', [('content-type', mimetype)])
+ return open(os.path.join(fname)).read()
+
+
+class DebugMiddleware(object):
+ """Debug middleware. Skip auth, get vnc connect info from query string."""
+
+ def __init__(self, app):
+ self.app = app
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ if req.path == WS_ENDPOINT:
+ req.environ['vnc_host'] = req.params.get('host')
+ req.environ['vnc_port'] = int(req.params.get('port'))
+ return req.get_response(self.app)
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 2f4494845..4b4bb9dc5 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -82,7 +82,8 @@ class API(base.Base):
self.db.volume_update(context, volume_id, fields)
def get(self, context, volume_id):
- return self.db.volume_get(context, volume_id)
+ rv = self.db.volume_get(context, volume_id)
+ return dict(rv.iteritems())
def get_all(self, context):
if context.is_admin:
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index e3744c790..850893914 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -65,14 +65,14 @@ class VolumeDriver(object):
self._execute = execute
self._sync_exec = sync_exec
- def _try_execute(self, command):
+ def _try_execute(self, *command):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
- self._execute(command)
+ self._execute(*command)
return True
except exception.ProcessExecutionError:
tries = tries + 1
@@ -84,7 +84,7 @@ class VolumeDriver(object):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
- out, err = self._execute("sudo vgs --noheadings -o name")
+ out, err = self._execute('sudo', 'vgs', '--noheadings', '-o', 'name')
volume_groups = out.split()
if not FLAGS.volume_group in volume_groups:
raise exception.Error(_("volume group %s doesn't exist")
@@ -97,22 +97,22 @@ class VolumeDriver(object):
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
- self._try_execute("sudo lvcreate -L %s -n %s %s" %
- (sizestr,
+ self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n',
volume['name'],
- FLAGS.volume_group))
+ FLAGS.volume_group)
def delete_volume(self, volume):
"""Deletes a logical volume."""
try:
- self._try_execute("sudo lvdisplay %s/%s" %
+ self._try_execute('sudo', 'lvdisplay',
+ '%s/%s' %
(FLAGS.volume_group,
volume['name']))
except Exception as e:
# If the volume isn't present, then don't attempt to delete
return True
- self._try_execute("sudo lvremove -f %s/%s" %
+ self._try_execute('sudo', 'lvremove', '-f', "%s/%s" %
(FLAGS.volume_group,
volume['name']))
@@ -135,7 +135,7 @@ class VolumeDriver(object):
"""Removes an export for a logical volume."""
raise NotImplementedError()
- def discover_volume(self, volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
raise NotImplementedError()
@@ -143,6 +143,10 @@ class VolumeDriver(object):
"""Undiscover volume on a remote host."""
raise NotImplementedError()
+ def check_for_export(self, context, volume_id):
+ """Make sure volume is exported."""
+ raise NotImplementedError()
+
class AOEDriver(VolumeDriver):
"""Implements AOE specific volume commands."""
@@ -168,12 +172,13 @@ class AOEDriver(VolumeDriver):
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
volume['id'])
self._try_execute(
- "sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (shelf_id,
+ 'sudo', 'vblade-persist', 'setup',
+ shelf_id,
blade_id,
FLAGS.aoe_eth_dev,
- FLAGS.volume_group,
- volume['name']))
+ "/dev/%s/%s" %
+ (FLAGS.volume_group,
+ volume['name']))
# NOTE(vish): The standard _try_execute does not work here
# because these methods throw errors if other
# volumes on this host are in the process of
@@ -182,9 +187,9 @@ class AOEDriver(VolumeDriver):
# just wait a bit for the current volume to
# be ready and ignore any errors.
time.sleep(2)
- self._execute("sudo vblade-persist auto all",
+ self._execute('sudo', 'vblade-persist', 'auto', 'all',
check_exit_code=False)
- self._execute("sudo vblade-persist start all",
+ self._execute('sudo', 'vblade-persist', 'start', 'all',
check_exit_code=False)
def remove_export(self, context, volume):
@@ -192,20 +197,50 @@ class AOEDriver(VolumeDriver):
(shelf_id,
blade_id) = self.db.volume_get_shelf_and_blade(context,
volume['id'])
- self._try_execute("sudo vblade-persist stop %s %s" %
- (shelf_id, blade_id))
- self._try_execute("sudo vblade-persist destroy %s %s" %
- (shelf_id, blade_id))
+ self._try_execute('sudo', 'vblade-persist', 'stop',
+ shelf_id, blade_id)
+ self._try_execute('sudo', 'vblade-persist', 'destroy',
+ shelf_id, blade_id)
- def discover_volume(self, _volume):
+ def discover_volume(self, context, _volume):
"""Discover volume on a remote host."""
- self._execute("sudo aoe-discover")
- self._execute("sudo aoe-stat", check_exit_code=False)
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ _volume['id'])
+ self._execute('sudo', 'aoe-discover')
+ out, err = self._execute('sudo', 'aoe-stat', check_exit_code=False)
+ device_path = 'e%(shelf_id)d.%(blade_id)d' % locals()
+ if out.find(device_path) >= 0:
+ return "/dev/etherd/%s" % device_path
+ else:
+ return
def undiscover_volume(self, _volume):
"""Undiscover volume on a remote host."""
pass
+ def check_for_export(self, context, volume_id):
+ """Make sure volume is exported."""
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ volume_id)
+ cmd = ('sudo', 'vblade-persist', 'ls', '--no-header')
+ out, _err = self._execute(*cmd)
+ exported = False
+ for line in out.split('\n'):
+ param = line.split(' ')
+ if len(param) == 6 and param[0] == str(shelf_id) \
+ and param[1] == str(blade_id) and param[-1] == "run":
+ exported = True
+ break
+ if not exported:
+ # Instance will be terminated in this case.
+ desc = _("Cannot confirm exported volume id:%(volume_id)s. "
+ "vblade process for e%(shelf_id)s.%(blade_id)s "
+ "isn't running.") % locals()
+ raise exception.ProcessExecutionError(out, _err, cmd=cmd,
+ description=desc)
+
class FakeAOEDriver(AOEDriver):
"""Logs calls instead of executing."""
@@ -252,13 +287,16 @@ class ISCSIDriver(VolumeDriver):
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
- self._sync_exec("sudo ietadm --op new "
- "--tid=%s --params Name=%s" %
- (iscsi_target, iscsi_name),
+ self._sync_exec('sudo', 'ietadm', '--op', 'new',
+ "--tid=%s" % iscsi_target,
+ '--params',
+ "Name=%s" % iscsi_name,
check_exit_code=False)
- self._sync_exec("sudo ietadm --op new --tid=%s "
- "--lun=0 --params Path=%s,Type=fileio" %
- (iscsi_target, volume_path),
+ self._sync_exec('sudo', 'ietadm', '--op', 'new',
+ "--tid=%s" % iscsi_target,
+ '--lun=0',
+ '--params',
+ "Path=%s,Type=fileio" % volume_path,
check_exit_code=False)
def _ensure_iscsi_targets(self, context, host):
@@ -279,12 +317,13 @@ class ISCSIDriver(VolumeDriver):
volume['host'])
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
- self._execute("sudo ietadm --op new "
- "--tid=%s --params Name=%s" %
- (iscsi_target, iscsi_name))
- self._execute("sudo ietadm --op new --tid=%s "
- "--lun=0 --params Path=%s,Type=fileio" %
- (iscsi_target, volume_path))
+ self._execute('sudo', 'ietadm', '--op', 'new',
+ '--tid=%s' % iscsi_target,
+ '--params', 'Name=%s' % iscsi_name)
+ self._execute('sudo', 'ietadm', '--op', 'new',
+ '--tid=%s' % iscsi_target,
+ '--lun=0', '--params',
+ 'Path=%s,Type=fileio' % volume_path)
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
@@ -299,16 +338,18 @@ class ISCSIDriver(VolumeDriver):
try:
# ietadm show will exit with an error
# this export has already been removed
- self._execute("sudo ietadm --op show --tid=%s " % iscsi_target)
+ self._execute('sudo', 'ietadm', '--op', 'show',
+ '--tid=%s' % iscsi_target)
except Exception as e:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"is presently exported for volume: %d"), volume['id'])
return
- self._execute("sudo ietadm --op delete --tid=%s "
- "--lun=0" % iscsi_target)
- self._execute("sudo ietadm --op delete --tid=%s" %
- iscsi_target)
+ self._execute('sudo', 'ietadm', '--op', 'delete',
+ '--tid=%s' % iscsi_target,
+ '--lun=0')
+ self._execute('sudo', 'ietadm', '--op', 'delete',
+ '--tid=%s' % iscsi_target)
def _do_iscsi_discovery(self, volume):
#TODO(justinsb): Deprecate discovery and use stored info
@@ -317,8 +358,8 @@ class ISCSIDriver(VolumeDriver):
volume_name = volume['name']
- (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" % (volume['host']))
+ (out, _err) = self._execute('sudo', 'iscsiadm', '-m', 'discovery',
+ '-t', 'sendtargets', '-p', volume['host'])
for target in out.splitlines():
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
return target
@@ -381,26 +422,25 @@ class ISCSIDriver(VolumeDriver):
return properties
def _run_iscsiadm(self, iscsi_properties, iscsi_command):
- command = ("sudo iscsiadm -m node -T %s -p %s %s" %
- (iscsi_properties['target_iqn'],
- iscsi_properties['target_portal'],
- iscsi_command))
- (out, err) = self._execute(command)
+ (out, err) = self._execute('sudo', 'iscsiadm', '-m', 'node', '-T',
+ iscsi_properties['target_iqn'],
+ '-p', iscsi_properties['target_portal'],
+ iscsi_command)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
- iscsi_command = ("--op update -n %s -v %s" %
- (property_key, property_value))
+ iscsi_command = ('--op', 'update', '-n', property_key,
+ '-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command)
- def discover_volume(self, volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
iscsi_properties = self._get_iscsi_properties(volume)
if not iscsi_properties['target_discovered']:
- self._run_iscsiadm(iscsi_properties, "--op new")
+ self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
@@ -452,7 +492,22 @@ class ISCSIDriver(VolumeDriver):
iscsi_properties = self._get_iscsi_properties(volume)
self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
self._run_iscsiadm(iscsi_properties, "--logout")
- self._run_iscsiadm(iscsi_properties, "--op delete")
+ self._run_iscsiadm(iscsi_properties, ('--op', 'delete'))
+
+ def check_for_export(self, context, volume_id):
+ """Make sure volume is exported."""
+
+ tid = self.db.volume_get_iscsi_target_num(context, volume_id)
+ try:
+ self._execute('sudo', 'ietadm', '--op', 'show',
+ '--tid=%(tid)d' % locals())
+ except exception.ProcessExecutionError, e:
+ # Instances remount read-only in this case.
+ # /etc/init.d/iscsitarget restart and rebooting nova-volume
+ # is better since ensure_export() works at boot time.
+ logging.error(_("Cannot confirm exported volume "
+ "id:%(volume_id)s.") % locals())
+ raise
class FakeISCSIDriver(ISCSIDriver):
@@ -478,7 +533,7 @@ class RBDDriver(VolumeDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
- (stdout, stderr) = self._execute("rados lspools")
+ (stdout, stderr) = self._execute('rados', 'lspools')
pools = stdout.split("\n")
if not FLAGS.rbd_pool in pools:
raise exception.Error(_("rbd has no pool %s") %
@@ -490,16 +545,13 @@ class RBDDriver(VolumeDriver):
size = 100
else:
size = int(volume['size']) * 1024
- self._try_execute("rbd --pool %s --size %d create %s" %
- (FLAGS.rbd_pool,
- size,
- volume['name']))
+ self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
+ '--size', size, 'create', volume['name'])
def delete_volume(self, volume):
"""Deletes a logical volume."""
- self._try_execute("rbd --pool %s rm %s" %
- (FLAGS.rbd_pool,
- volume['name']))
+ self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
+ 'rm', volume['name'])
def local_path(self, volume):
"""Returns the path of the rbd volume."""
@@ -521,6 +573,8 @@ class RBDDriver(VolumeDriver):
def discover_volume(self, volume):
"""Discover volume on a remote host"""
+ # NOTE(justinsb): This is messed up... discover_volume takes 3 args
+ # but then that would break local_path
return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name'])
def undiscover_volume(self, volume):
@@ -534,7 +588,7 @@ class SheepdogDriver(VolumeDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
try:
- (out, err) = self._execute("collie cluster info")
+ (out, err) = self._execute('collie', 'cluster', 'info')
if not out.startswith('running'):
raise exception.Error(_("Sheepdog is not working: %s") % out)
except exception.ProcessExecutionError:
@@ -546,12 +600,13 @@ class SheepdogDriver(VolumeDriver):
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
- self._try_execute("qemu-img create sheepdog:%s %s" %
- (volume['name'], sizestr))
+ self._try_execute('qemu-img', 'create',
+ "sheepdog:%s" % volume['name'],
+ sizestr)
def delete_volume(self, volume):
"""Deletes a logical volume"""
- self._try_execute("collie vdi delete %s" % volume['name'])
+ self._try_execute('collie', 'vdi', 'delete', volume['name'])
def local_path(self, volume):
return "sheepdog:%s" % volume['name']
@@ -568,10 +623,81 @@ class SheepdogDriver(VolumeDriver):
"""Removes an export for a logical volume"""
pass
- def discover_volume(self, volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host"""
return "sheepdog:%s" % volume['name']
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host"""
pass
+
+
+class LoggingVolumeDriver(VolumeDriver):
+ """Logs and records calls, for unit tests."""
+
+ def check_for_setup_error(self):
+ pass
+
+ def create_volume(self, volume):
+ self.log_action('create_volume', volume)
+
+ def delete_volume(self, volume):
+ self.log_action('delete_volume', volume)
+
+ def local_path(self, volume):
+ print "local_path not implemented"
+ raise NotImplementedError()
+
+ def ensure_export(self, context, volume):
+ self.log_action('ensure_export', volume)
+
+ def create_export(self, context, volume):
+ self.log_action('create_export', volume)
+
+ def remove_export(self, context, volume):
+ self.log_action('remove_export', volume)
+
+ def discover_volume(self, context, volume):
+ self.log_action('discover_volume', volume)
+
+ def undiscover_volume(self, volume):
+ self.log_action('undiscover_volume', volume)
+
+ def check_for_export(self, context, volume_id):
+ self.log_action('check_for_export', volume_id)
+
+ _LOGS = []
+
+ @staticmethod
+ def clear_logs():
+ LoggingVolumeDriver._LOGS = []
+
+ @staticmethod
+ def log_action(action, parameters):
+ """Logs the command."""
+ LOG.debug(_("LoggingVolumeDriver: %s") % (action))
+ log_dictionary = {}
+ if parameters:
+ log_dictionary = dict(parameters)
+ log_dictionary['action'] = action
+ LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary))
+ LoggingVolumeDriver._LOGS.append(log_dictionary)
+
+ @staticmethod
+ def all_logs():
+ return LoggingVolumeDriver._LOGS
+
+ @staticmethod
+ def logs_like(action, **kwargs):
+ matches = []
+ for entry in LoggingVolumeDriver._LOGS:
+ if entry['action'] != action:
+ continue
+ match = True
+ for k, v in kwargs.iteritems():
+ if entry.get(k) != v:
+ match = False
+ break
+ if match:
+ matches.append(entry)
+ return matches
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 3e8bc16b3..2178389ce 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -64,14 +64,15 @@ flags.DEFINE_boolean('use_local_volumes', True,
'if True, will not discover local volumes')
-class VolumeManager(manager.Manager):
+class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
def __init__(self, volume_driver=None, *args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
if not volume_driver:
volume_driver = FLAGS.volume_driver
self.driver = utils.import_object(volume_driver)
- super(VolumeManager, self).__init__(*args, **kwargs)
+ super(VolumeManager, self).__init__(service_name='volume',
+ *args, **kwargs)
# NOTE(vish): Implementation specific db handling is done
# by the driver.
self.driver.db = self.db
@@ -160,7 +161,7 @@ class VolumeManager(manager.Manager):
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
path = self.driver.local_path(volume_ref)
else:
- path = self.driver.discover_volume(volume_ref)
+ path = self.driver.discover_volume(context, volume_ref)
return path
def remove_compute_volume(self, context, volume_id):
@@ -171,3 +172,9 @@ class VolumeManager(manager.Manager):
return True
else:
self.driver.undiscover_volume(volume_ref)
+
+ def check_for_export(self, context, instance_id):
+ """Make sure whether volume is exported."""
+ instance_ref = self.db.instance_get(context, instance_id)
+ for volume in instance_ref['volumes']:
+ self.driver.check_for_export(context, volume['id'])
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 1eb66d067..ba0819466 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -36,6 +36,7 @@ import webob.exc
from paste import deploy
+from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
@@ -82,6 +83,35 @@ class Server(object):
log=WritableLogger(logger))
+class Request(webob.Request):
+
+ def best_match_content_type(self):
+ """
+ Determine the most acceptable content-type based on the
+ query extension then the Accept header
+ """
+
+ parts = self.path.rsplit(".", 1)
+
+ if len(parts) > 1:
+ format = parts[1]
+ if format in ["json", "xml"]:
+ return "application/{0}".format(parts[1])
+
+ ctypes = ["application/json", "application/xml"]
+ bm = self.accept.best_match(ctypes)
+
+ return bm or "application/json"
+
+ def get_content_type(self):
+ try:
+ ct = self.headers["Content-Type"]
+ assert ct in ("application/xml", "application/json")
+ return ct
+ except Exception:
+ raise webob.exc.HTTPBadRequest("Invalid content type")
+
+
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@@ -113,7 +143,7 @@ class Application(object):
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
@@ -199,7 +229,7 @@ class Middleware(Application):
"""Do whatever you'd like to the response."""
return response
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
@@ -212,7 +242,7 @@ class Debug(Middleware):
"""Helper class that can be inserted into any WSGI application chain
to get information about the request and response."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print ("*" * 40) + " REQUEST ENVIRON"
for key, value in req.environ.items():
@@ -276,7 +306,7 @@ class Router(object):
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""
Route the incoming request to a controller based on self.map.
@@ -285,7 +315,7 @@ class Router(object):
return self._router
@staticmethod
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
@@ -304,11 +334,11 @@ class Controller(object):
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon itself. All action methods
must, in addition to their normal parameters, accept a 'req' argument
- which is the incoming webob.Request. They raise a webob.exc exception,
+ which is the incoming wsgi.Request. They raise a webob.exc exception,
or return a dict which will be serialized by requested content type.
"""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""
Call the method specified in req.environ by RoutesMiddleware.
@@ -318,32 +348,45 @@ class Controller(object):
method = getattr(self, action)
del arg_dict['controller']
del arg_dict['action']
+ if 'format' in arg_dict:
+ del arg_dict['format']
arg_dict['req'] = req
result = method(**arg_dict)
+
if type(result) is dict:
- return self._serialize(result, req)
+ content_type = req.best_match_content_type()
+ body = self._serialize(result, content_type)
+
+ response = webob.Response()
+ response.headers["Content-Type"] = content_type
+ response.body = body
+ return response
+
else:
return result
- def _serialize(self, data, request):
+ def _serialize(self, data, content_type):
"""
- Serialize the given dict to the response type requested in request.
+ Serialize the given dict to the provided content_type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), "_serialization_metadata", {})
- serializer = Serializer(request.environ, _metadata)
- return serializer.to_content_type(data)
+ serializer = Serializer(_metadata)
+ try:
+ return serializer.serialize(data, content_type)
+ except exception.InvalidContentType:
+ raise webob.exc.HTTPNotAcceptable()
- def _deserialize(self, data, request):
+ def _deserialize(self, data, content_type):
"""
- Deserialize the request body to the response type requested in request.
+ Deserialize the request body to the specefied content type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), "_serialization_metadata", {})
- serializer = Serializer(request.environ, _metadata)
- return serializer.deserialize(data)
+ serializer = Serializer(_metadata)
+ return serializer.deserialize(data, content_type)
class Serializer(object):
@@ -351,50 +394,53 @@ class Serializer(object):
Serializes and deserializes dictionaries to certain MIME types.
"""
- def __init__(self, environ, metadata=None):
+ def __init__(self, metadata=None):
"""
Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
- req = webob.Request.blank('', environ)
- suffix = req.path_info.split('.')[-1].lower()
- if suffix == 'json':
- self.handler = self._to_json
- elif suffix == 'xml':
- self.handler = self._to_xml
- elif 'application/json' in req.accept:
- self.handler = self._to_json
- elif 'application/xml' in req.accept:
- self.handler = self._to_xml
- else:
- # This is the default
- self.handler = self._to_json
- def to_content_type(self, data):
- """
- Serialize a dictionary into a string.
+ def _get_serialize_handler(self, content_type):
+ handlers = {
+ "application/json": self._to_json,
+ "application/xml": self._to_xml,
+ }
- The format of the string will be decided based on the Content Type
- requested in self.environ: by Accept: header, or by URL suffix.
+ try:
+ return handlers[content_type]
+ except Exception:
+ raise exception.InvalidContentType()
+
+ def serialize(self, data, content_type):
+ """
+ Serialize a dictionary into a string of the specified content type.
"""
- return self.handler(data)
+ return self._get_serialize_handler(content_type)(data)
- def deserialize(self, datastring):
+ def deserialize(self, datastring, content_type):
"""
Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
- datastring = datastring.strip()
+ return self.get_deserialize_handler(content_type)(datastring)
+
+ def get_deserialize_handler(self, content_type):
+ handlers = {
+ "application/json": self._from_json,
+ "application/xml": self._from_xml,
+ }
+
try:
- is_xml = (datastring[0] == '<')
- if not is_xml:
- return utils.loads(datastring)
- return self._from_xml(datastring)
- except:
- return None
+ return handlers[content_type]
+ except Exception:
+ raise exception.InvalidContentType(_("Invalid content type %s"
+ % content_type))
+
+ def _from_json(self, datastring):
+ return utils.loads(datastring)
def _from_xml(self, datastring):
xmldata = self.metadata.get('application/xml', {})
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
index d60816ce7..48122e6d6 100755
--- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
+++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
@@ -30,13 +30,14 @@ import simplejson as json
def main(dom_id, command, only_this_vif=None):
- xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \
- % dom_id, True)
+ xsls = execute('/usr/bin/xenstore-ls',
+ '/local/domain/%s/vm-data/networking' % dom_id, True)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
- xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s"
- xsread = execute(xsr % (dom_id, mac), True)
+ xsread = execute('/usr/bin/enstore-read',
+ '/local/domain/%s/vm-data/networking/%s' %
+ (dom_id, mac), True)
data = json.loads(xsread)
for ip in data['ips']:
if data["label"] == "public":
@@ -51,9 +52,10 @@ def main(dom_id, command, only_this_vif=None):
apply_iptables_rules(command, params)
-def execute(command, return_stdout=False):
+def execute(*command, return_stdout=False):
devnull = open(os.devnull, 'w')
- proc = subprocess.Popen(command, shell=True, close_fds=True,
+ command = map(str, command)
+ proc = subprocess.Popen(command, close_fds=True,
stdout=subprocess.PIPE, stderr=devnull)
devnull.close()
if return_stdout:
@@ -67,45 +69,68 @@ def execute(command, return_stdout=False):
def apply_iptables_rules(command, params):
- iptables = lambda rule: execute("/sbin/iptables %s" % rule)
+ iptables = lambda *rule: execute('/sbin/iptables', *rule)
- iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
- -j ACCEPT" % params)
+ iptables('-D', 'FORWARD', '-m', 'physdev',
+ '--physdev-in', params['VIF'],
+ '-s', params['IP'],
+ '-j', 'ACCEPT')
if command == 'online':
- iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
- -j ACCEPT" % params)
+ iptables('-A', 'FORWARD', '-m', 'physdev',
+ '--physdev-in', params['VIF'],
+ '-s', params['IP'],
+ '-j', 'ACCEPT')
def apply_arptables_rules(command, params):
- arptables = lambda rule: execute("/sbin/arptables %s" % rule)
-
- arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \
- --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
- arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \
- --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ arptables = lambda *rule: execute('/sbin/arptables', *rule)
+
+ arptables('-D', 'FORWARD', '--opcode', 'Request',
+ '--in-interface', params['VIF'],
+ '--source-ip', params['IP'],
+ '--source-mac', params['MAC'],
+ '-j', 'ACCEPT')
+ arptables('-D', 'FORWARD', '--opcode', 'Reply',
+ '--in-interface', params['VIF'],
+ '--source-ip', params['IP'],
+ '--source-mac', params['MAC'],
+ '-j', 'ACCEPT')
if command == 'online':
- arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \
- --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
- arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \
- --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ arptables('-A', 'FORWARD', '--opcode', 'Request',
+ '--in-interface', params['VIF'],
+ '--source-mac', params['MAC'],
+ '-j', 'ACCEPT')
+ arptables('-A', 'FORWARD', '--opcode', 'Reply',
+ '--in-interface', params['VIF'],
+ '--source-ip', params['IP'],
+ '--source-mac', params['MAC'],
+ '-j', 'ACCEPT')
def apply_ebtables_rules(command, params):
- ebtables = lambda rule: execute("/sbin/ebtables %s" % rule)
-
- ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" %
- params)
- ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" %
- params)
+ ebtables = lambda *rule: execute("/sbin/ebtables", *rule)
+
+ ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'],
+ '--arp-ip-dst', params['IP'],
+ '-j', 'ACCEPT')
+ ebtables('-D', 'FORWARD', '-p', '0800', '-o',
+ params['VIF'], '--ip-dst', params['IP'],
+ '-j', 'ACCEPT')
if command == 'online':
- ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \
- -j ACCEPT" % params)
- ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \
- -j ACCEPT" % params)
-
- ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
+ ebtables('-A', 'FORWARD', '-p', '0806',
+ '-o', params['VIF'],
+ '--arp-ip-dst', params['IP'],
+ '-j', 'ACCEPT')
+ ebtables('-A', 'FORWARD', '-p', '0800',
+ '-o', params['VIF'],
+ '--ip-dst', params['IP'],
+ '-j', 'ACCEPT')
+
+ ebtables('-D', 'FORWARD', '-s', '!', params['MAC'],
+ '-i', params['VIF'], '-j', 'DROP')
if command == 'online':
- ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
+ ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'],
+ '-i', params['VIF'], '-j', 'DROP')
if __name__ == "__main__":
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
index 94eaabe73..5496a6bd5 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
@@ -22,6 +22,8 @@
# XenAPI plugin for reading/writing information to xenstore
#
+import base64
+import commands
try:
import json
except ImportError:
@@ -66,7 +68,7 @@ def key_init(self, arg_dict):
try:
resp = _wait_for_agent(self, request_id, arg_dict)
except TimeoutError, e:
- raise PluginError("%s" % e)
+ raise PluginError(e)
return resp
@@ -87,7 +89,7 @@ def password(self, arg_dict):
try:
resp = _wait_for_agent(self, request_id, arg_dict)
except TimeoutError, e:
- raise PluginError("%s" % e)
+ raise PluginError(e)
return resp
@@ -102,6 +104,75 @@ def resetnetwork(self, arg_dict):
xenstore.write_record(self, arg_dict)
+@jsonify
+def inject_file(self, arg_dict):
+ """Expects a file path and the contents of the file to be written. Both
+ should be base64-encoded in order to eliminate errors as they are passed
+ through the stack. Writes that information to xenstore for the agent,
+ which will decode the file and intended path, and create it on the
+ instance. The original agent munged both of these into a single entry;
+ the new agent keeps them separate. We will need to test for the new agent,
+ and write the xenstore records to match the agent version. We will also
+ need to test to determine if the file injection method on the agent has
+ been disabled, and raise a NotImplemented error if that is the case.
+ """
+ b64_path = arg_dict["b64_path"]
+ b64_file = arg_dict["b64_file"]
+ request_id = arg_dict["id"]
+ if self._agent_has_method("file_inject"):
+ # New version of the agent. Agent should receive a 'value'
+ # key whose value is a dictionary containing 'b64_path' and
+ # 'b64_file'. See old version below.
+ arg_dict["value"] = json.dumps({"name": "file_inject",
+ "value": {"b64_path": b64_path, "b64_file": b64_file}})
+ elif self._agent_has_method("injectfile"):
+ # Old agent requires file path and file contents to be
+ # combined into one base64 value.
+ raw_path = base64.b64decode(b64_path)
+ raw_file = base64.b64decode(b64_file)
+ new_b64 = base64.b64encode("%s,%s") % (raw_path, raw_file)
+ arg_dict["value"] = json.dumps({"name": "injectfile",
+ "value": new_b64})
+ else:
+ # Either the methods don't exist in the agent, or they
+ # have been disabled.
+ raise NotImplementedError(_("NOT IMPLEMENTED: Agent does not"
+ " support file injection."))
+ arg_dict["path"] = "data/host/%s" % request_id
+ xenstore.write_record(self, arg_dict)
+ try:
+ resp = _wait_for_agent(self, request_id, arg_dict)
+ except TimeoutError, e:
+ raise PluginError(e)
+ return resp
+
+
+def _agent_has_method(self, method):
+ """Check that the agent has a particular method by checking its
+ features. Cache the features so we don't have to query the agent
+ every time we need to check.
+ """
+ try:
+ self._agent_methods
+ except AttributeError:
+ self._agent_methods = []
+ if not self._agent_methods:
+ # Haven't been defined
+ tmp_id = commands.getoutput("uuidgen")
+ dct = {}
+ dct["value"] = json.dumps({"name": "features", "value": ""})
+ dct["path"] = "data/host/%s" % tmp_id
+ xenstore.write_record(self, dct)
+ try:
+ resp = _wait_for_agent(self, tmp_id, dct)
+ except TimeoutError, e:
+ raise PluginError(e)
+ response = json.loads(resp)
+ # The agent returns a comma-separated list of methods.
+ self._agent_methods = response.split(",")
+ return method in self._agent_methods
+
+
def _wait_for_agent(self, request_id, arg_dict):
"""Periodically checks xenstore for a response from the agent.
The request is always written to 'data/host/{id}', and
@@ -119,9 +190,8 @@ def _wait_for_agent(self, request_id, arg_dict):
# First, delete the request record
arg_dict["path"] = "data/host/%s" % request_id
xenstore.delete_record(self, arg_dict)
- raise TimeoutError(
- "TIMEOUT: No response from agent within %s seconds." %
- AGENT_TIMEOUT)
+ raise TimeoutError(_("TIMEOUT: No response from agent within"
+ " %s seconds.") % AGENT_TIMEOUT)
ret = xenstore.read_record(self, arg_dict)
# Note: the response for None with be a string that includes
# double quotes.
@@ -136,4 +206,5 @@ if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"key_init": key_init,
"password": password,
- "resetnetwork": resetnetwork})
+ "resetnetwork": resetnetwork,
+ "inject_file": inject_file})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 61b947c25..0a45f3873 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -21,17 +21,14 @@
# XenAPI plugin for managing glance images
#
-import base64
-import errno
-import hmac
import httplib
import os
import os.path
import pickle
-import sha
+import shlex
+import shutil
import subprocess
-import time
-import urlparse
+import tempfile
import XenAPIPlugin
@@ -41,30 +38,6 @@ configure_logging('glance')
CHUNK_SIZE = 8192
KERNEL_DIR = '/boot/guest'
-FILE_SR_PATH = '/var/run/sr-mount'
-
-
-def remove_kernel_ramdisk(session, args):
- """Removes kernel and/or ramdisk from dom0's file system"""
- kernel_file = exists(args, 'kernel-file')
- ramdisk_file = exists(args, 'ramdisk-file')
- if kernel_file:
- os.remove(kernel_file)
- if ramdisk_file:
- os.remove(ramdisk_file)
- return "ok"
-
-
-def copy_kernel_vdi(session, args):
- vdi = exists(args, 'vdi-ref')
- size = exists(args, 'image-size')
- #Use the uuid as a filename
- vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
- copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
- filename = with_vdi_in_dom0(session, vdi, False,
- lambda dev:
- _copy_kernel_vdi('/dev/%s' % dev, copy_args))
- return filename
def _copy_kernel_vdi(dest, copy_args):
@@ -89,93 +62,321 @@ def _copy_kernel_vdi(dest, copy_args):
return filename
-def put_vdis(session, args):
+def _download_tarball(sr_path, staging_path, image_id, glance_host,
+ glance_port):
+ """Download the tarball image from Glance and extract it into the staging
+ area.
+ """
+ conn = httplib.HTTPConnection(glance_host, glance_port)
+ conn.request('GET', '/images/%s' % image_id)
+ resp = conn.getresponse()
+ if resp.status == httplib.NOT_FOUND:
+ raise Exception("Image '%s' not found in Glance" % image_id)
+ elif resp.status != httplib.OK:
+ raise Exception("Unexpected response from Glance %i" % res.status)
+
+ tar_cmd = "tar -zx --directory=%(staging_path)s" % locals()
+ tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True)
+
+ chunk = resp.read(CHUNK_SIZE)
+ while chunk:
+ tar_proc.stdin.write(chunk)
+ chunk = resp.read(CHUNK_SIZE)
+
+ _finish_subprocess(tar_proc, tar_cmd)
+ conn.close()
+
+
+def _fixup_vhds(sr_path, staging_path, uuid_stack):
+ """Fixup the downloaded VHDs before we move them into the SR.
+
+ We cannot extract VHDs directly into the SR since they don't yet have
+ UUIDs, aren't properly associated with each other, and would be subject to
+ a race-condition of one-file being present and the other not being
+ downloaded yet.
+
+ To avoid these we problems, we use a staging area to fixup the VHDs before
+ moving them into the SR. The steps involved are:
+
+ 1. Extracting tarball into staging area
+
+ 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd')
+
+ 3. Linking the two VHDs together
+
+ 4. Pseudo-atomically moving the images into the SR. (It's not really
+ atomic because it takes place as two os.rename operations; however,
+ the chances of an SR.scan occuring between the two rename()
+ invocations is so small that we can safely ignore it)
+ """
+ def rename_with_uuid(orig_path):
+ """Rename VHD using UUID so that it will be recognized by SR on a
+ subsequent scan.
+
+ Since Python2.4 doesn't have the `uuid` module, we pass a stack of
+ pre-computed UUIDs from the compute worker.
+ """
+ orig_dirname = os.path.dirname(orig_path)
+ uuid = uuid_stack.pop()
+ new_path = os.path.join(orig_dirname, "%s.vhd" % uuid)
+ os.rename(orig_path, new_path)
+ return new_path, uuid
+
+ def link_vhds(child_path, parent_path):
+ """Use vhd-util to associate the snapshot VHD with its base_copy.
+
+ This needs to be done before we move both VHDs into the SR to prevent
+ the base_copy from being DOA (deleted-on-arrival).
+ """
+ modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s"
+ % locals())
+ modify_proc = _make_subprocess(modify_cmd, stderr=True)
+ _finish_subprocess(modify_proc, modify_cmd)
+
+ def move_into_sr(orig_path):
+ """Move a file into the SR"""
+ filename = os.path.basename(orig_path)
+ new_path = os.path.join(sr_path, filename)
+ os.rename(orig_path, new_path)
+ return new_path
+
+ def assert_vhd_not_hidden(path):
+ """
+ This is a sanity check on the image; if a snap.vhd isn't
+ present, then the image.vhd better not be marked 'hidden' or it will
+ be deleted when moved into the SR.
+ """
+ query_cmd = "vhd-util query -n %(path)s -f" % locals()
+ query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True)
+ out, err = _finish_subprocess(query_proc, query_cmd)
+
+ for line in out.splitlines():
+ if line.startswith('hidden'):
+ value = line.split(':')[1].strip()
+ if value == "1":
+ raise Exception(
+ "VHD %(path)s is marked as hidden without child" %
+ locals())
+
+ orig_base_copy_path = os.path.join(staging_path, 'image.vhd')
+ if not os.path.exists(orig_base_copy_path):
+ raise Exception("Invalid image: image.vhd not present")
+
+ base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path)
+
+ vdi_uuid = base_copy_uuid
+ orig_snap_path = os.path.join(staging_path, 'snap.vhd')
+ if os.path.exists(orig_snap_path):
+ snap_path, snap_uuid = rename_with_uuid(orig_snap_path)
+ vdi_uuid = snap_uuid
+ # NOTE(sirp): this step is necessary so that an SR scan won't
+ # delete the base_copy out from under us (since it would be
+ # orphaned)
+ link_vhds(snap_path, base_copy_path)
+ move_into_sr(snap_path)
+ else:
+ assert_vhd_not_hidden(base_copy_path)
+
+ move_into_sr(base_copy_path)
+ return vdi_uuid
+
+
+def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
+ """Hard-link VHDs into staging area with appropriate filename
+ ('snap' or 'image.vhd')
+ """
+ for name, uuid in vdi_uuids.items():
+ source = os.path.join(sr_path, "%s.vhd" % uuid)
+ link_name = os.path.join(staging_path, "%s.vhd" % name)
+ os.link(source, link_name)
+
+
+def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type):
+ """
+ Create a tarball of the image and then stream that into Glance
+ using chunked-transfer-encoded HTTP.
+ """
+ conn = httplib.HTTPConnection(glance_host, glance_port)
+ # NOTE(sirp): httplib under python2.4 won't accept a file-like object
+ # to request
+ conn.putrequest('PUT', '/images/%s' % image_id)
+
+ # NOTE(sirp): There is some confusion around OVF. Here's a summary of
+ # where we currently stand:
+ # 1. OVF as a container format is misnamed. We really should be using
+ # OVA since that is the name for the container format; OVF is the
+ # standard applied to the manifest file contained within.
+ # 2. We're currently uploading a vanilla tarball. In order to be OVF/OVA
+ # compliant, we'll need to embed a minimal OVF manifest as the first
+ # file.
+ headers = {
+ 'content-type': 'application/octet-stream',
+ 'transfer-encoding': 'chunked',
+ 'x-image-meta-is-public': 'True',
+ 'x-image-meta-status': 'queued',
+ 'x-image-meta-disk-format': 'vhd',
+ 'x-image-meta-container-format': 'ovf',
+ 'x-image-meta-property-os-type': os_type}
+
+ for header, value in headers.iteritems():
+ conn.putheader(header, value)
+ conn.endheaders()
+
+ tar_cmd = "tar -zc --directory=%(staging_path)s ." % locals()
+ tar_proc = _make_subprocess(tar_cmd, stdout=True, stderr=True)
+
+ chunk = tar_proc.stdout.read(CHUNK_SIZE)
+ while chunk:
+ conn.send("%x\r\n%s\r\n" % (len(chunk), chunk))
+ chunk = tar_proc.stdout.read(CHUNK_SIZE)
+ conn.send("0\r\n\r\n")
+
+ _finish_subprocess(tar_proc, tar_cmd)
+
+ resp = conn.getresponse()
+ if resp.status != httplib.OK:
+ raise Exception("Unexpected response from Glance %i" % resp.status)
+ conn.close()
+
+
+def _make_staging_area(sr_path):
+ """
+ The staging area is a place where we can temporarily store and
+ manipulate VHDs. The use of the staging area is different for upload and
+ download:
+
+ Download
+ ========
+
+ When we download the tarball, the VHDs contained within will have names
+ like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before
+ moving them into the SR. However, since 'image.vhd' may be a base_copy, we
+ need to link it to 'snap.vhd' (using vhd-util modify) before moving both
+ into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted).
+ The staging area gives us a place to perform these operations before they
+ are moved to the SR, scanned, and then registered with XenServer.
+
+ Upload
+ ======
+
+ On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd'
+ in the case of the snapshot VHD, and 'image.vhd' in the case of the
+ base_copy. The staging area provides a directory in which we can create
+ hard-links to rename the VHDs without affecting what's in the SR.
+
+
+ NOTE
+ ====
+
+ The staging area is created as a subdirectory within the SR in order to
+ guarantee that it resides within the same filesystem and therefore permit
+ hard-linking and cheap file moves.
+ """
+ staging_path = tempfile.mkdtemp(dir=sr_path)
+ return staging_path
+
+
+def _cleanup_staging_area(staging_path):
+ """Remove staging area directory
+
+ On upload, the staging area contains hard-links to the VHDs in the SR;
+ it's safe to remove the staging-area because the SR will keep the link
+ count > 0 (so the VHDs in the SR will not be deleted).
+ """
+ shutil.rmtree(staging_path)
+
+
+def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False):
+ """Make a subprocess according to the given command-line string
+ """
+ kwargs = {}
+ kwargs['stdout'] = stdout and subprocess.PIPE or None
+ kwargs['stderr'] = stderr and subprocess.PIPE or None
+ kwargs['stdin'] = stdin and subprocess.PIPE or None
+ args = shlex.split(cmdline)
+ proc = subprocess.Popen(args, **kwargs)
+ return proc
+
+
+def _finish_subprocess(proc, cmdline):
+ """Ensure that the process returned a zero exit code indicating success
+ """
+ out, err = proc.communicate()
+ ret = proc.returncode
+ if ret != 0:
+ raise Exception("'%(cmdline)s' returned non-zero exit code: "
+ "retcode=%(ret)i, stderr='%(err)s'" % locals())
+ return out, err
+
+
+def download_vhd(session, args):
+ """Download an image from Glance, unbundle it, and then deposit the VHDs
+ into the storage repository
+ """
params = pickle.loads(exists(args, 'params'))
- vdi_uuids = params["vdi_uuids"]
image_id = params["image_id"]
glance_host = params["glance_host"]
glance_port = params["glance_port"]
+ uuid_stack = params["uuid_stack"]
+ sr_path = params["sr_path"]
- sr_path = get_sr_path(session)
- #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
- tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id))
- tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
- paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids]
- tar_cmd.extend(paths)
- logging.debug("Bundling image with cmd: %s", tar_cmd)
- subprocess.call(tar_cmd)
- logging.debug("Writing to test file %s", tmp_file)
- put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port)
- # FIXME(sirp): return anything useful here?
- return ""
-
-
-def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
- size = os.path.getsize(tmp_file)
- basename = os.path.basename(tmp_file)
-
- bundle = open(tmp_file, 'r')
+ staging_path = _make_staging_area(sr_path)
try:
- headers = {
- 'x-image-meta-store': 'file',
- 'x-image-meta-is_public': 'True',
- 'x-image-meta-type': 'raw',
- 'x-image-meta-size': size,
- 'content-length': size,
- 'content-type': 'application/octet-stream',
- }
- conn = httplib.HTTPConnection(glance_host, glance_port)
- #NOTE(sirp): httplib under python2.4 won't accept a file-like object
- # to request
- conn.putrequest('PUT', '/images/%s' % image_id)
-
- for header, value in headers.iteritems():
- conn.putheader(header, value)
- conn.endheaders()
-
- chunk = bundle.read(CHUNK_SIZE)
- while chunk:
- conn.send(chunk)
- chunk = bundle.read(CHUNK_SIZE)
-
- res = conn.getresponse()
- #FIXME(sirp): should this be 201 Created?
- if res.status != httplib.OK:
- raise Exception("Unexpected response from Glance %i" % res.status)
+ _download_tarball(sr_path, staging_path, image_id, glance_host,
+ glance_port)
+ vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack)
+ return vdi_uuid
finally:
- bundle.close()
+ _cleanup_staging_area(staging_path)
+
+
+def upload_vhd(session, args):
+ """Bundle the VHDs comprising an image and then stream them into Glance.
+ """
+ params = pickle.loads(exists(args, 'params'))
+ vdi_uuids = params["vdi_uuids"]
+ image_id = params["image_id"]
+ glance_host = params["glance_host"]
+ glance_port = params["glance_port"]
+ sr_path = params["sr_path"]
+ os_type = params["os_type"]
+ staging_path = _make_staging_area(sr_path)
+ try:
+ _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids)
+ _upload_tarball(staging_path, image_id, glance_host, glance_port,
+ os_type)
+ finally:
+ _cleanup_staging_area(staging_path)
-def get_sr_path(session):
- sr_ref = find_sr(session)
+ return "" # Nothing useful to return on an upload
- if sr_ref is None:
- raise Exception('Cannot find SR to read VDI from')
- sr_rec = session.xenapi.SR.get_record(sr_ref)
- sr_uuid = sr_rec["uuid"]
- sr_path = os.path.join(FILE_SR_PATH, sr_uuid)
- return sr_path
+def copy_kernel_vdi(session, args):
+ vdi = exists(args, 'vdi-ref')
+ size = exists(args, 'image-size')
+ #Use the uuid as a filename
+ vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
+ copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
+ filename = with_vdi_in_dom0(session, vdi, False,
+ lambda dev:
+ _copy_kernel_vdi('/dev/%s' % dev, copy_args))
+ return filename
-#TODO(sirp): both objectstore and glance need this, should this be refactored
-#into common lib
-def find_sr(session):
- host = get_this_host(session)
- srs = session.xenapi.SR.get_all()
- for sr in srs:
- sr_rec = session.xenapi.SR.get_record(sr)
- if not ('i18n-key' in sr_rec['other_config'] and
- sr_rec['other_config']['i18n-key'] == 'local-storage'):
- continue
- for pbd in sr_rec['PBDs']:
- pbd_rec = session.xenapi.PBD.get_record(pbd)
- if pbd_rec['host'] == host:
- return sr
- return None
+def remove_kernel_ramdisk(session, args):
+ """Removes kernel and/or ramdisk from dom0's file system"""
+ kernel_file = exists(args, 'kernel-file')
+ ramdisk_file = exists(args, 'ramdisk-file')
+ if kernel_file:
+ os.remove(kernel_file)
+ if ramdisk_file:
+ os.remove(ramdisk_file)
+ return "ok"
if __name__ == '__main__':
- XenAPIPlugin.dispatch({'put_vdis': put_vdis,
+ XenAPIPlugin.dispatch({'upload_vhd': upload_vhd,
+ 'download_vhd': download_vhd,
'copy_kernel_vdi': copy_kernel_vdi,
'remove_kernel_ramdisk': remove_kernel_ramdisk})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
new file mode 100644
index 000000000..75c653408
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+XenAPI Plugin for transfering data between host nodes
+"""
+
+import os
+import os.path
+import pickle
+import shlex
+import shutil
+import subprocess
+
+import XenAPIPlugin
+
+from pluginlib_nova import *
+configure_logging('migration')
+
+
+def move_vhds_into_sr(session, args):
+ """Moves the VHDs from their copied location to the SR"""
+ params = pickle.loads(exists(args, 'params'))
+ instance_id = params['instance_id']
+
+ old_base_copy_uuid = params['old_base_copy_uuid']
+ old_cow_uuid = params['old_cow_uuid']
+
+ new_base_copy_uuid = params['new_base_copy_uuid']
+ new_cow_uuid = params['new_cow_uuid']
+
+ sr_path = params['sr_path']
+ sr_temp_path = "%s/images/" % sr_path
+
+ # Discover the copied VHDs locally, and then set up paths to copy
+ # them to under the SR
+ source_image_path = "%s/instance%d" % ('/images/', instance_id)
+ source_base_copy_path = "%s/%s.vhd" % (source_image_path,
+ old_base_copy_uuid)
+ source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid)
+
+ temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id)
+ new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid)
+ new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid)
+
+ logging.debug('Creating temporary SR path %s' % temp_vhd_path)
+ os.makedirs(temp_vhd_path)
+
+ logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path))
+ shutil.move(source_base_copy_path, new_base_copy_path)
+
+ logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path))
+ shutil.move(source_cow_path, new_cow_path)
+
+ logging.debug('Cleaning up %s' % source_image_path)
+ os.rmdir(source_image_path)
+
+ # Link the COW to the base copy
+ logging.debug('Attaching COW to the base copy %s -> %s' %
+ (new_cow_path, new_base_copy_path))
+ subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' %
+ (new_cow_path, new_base_copy_path)))
+ logging.debug('Moving VHDs into SR %s' % sr_path)
+ shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path)
+ shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path)
+
+ logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path)
+ os.rmdir(temp_vhd_path)
+ return ""
+
+
+def transfer_vhd(session, args):
+ """Rsyncs a VHD to an adjacent host"""
+ params = pickle.loads(exists(args, 'params'))
+ instance_id = params['instance_id']
+ host = params['host']
+ vdi_uuid = params['vdi_uuid']
+ sr_path = params['sr_path']
+ vhd_path = "%s.vhd" % vdi_uuid
+
+ source_path = "%s/%s" % (sr_path, vhd_path)
+ dest_path = '%s:%sinstance%d/' % (host, '/images/', instance_id)
+
+ logging.debug("Preparing to transmit %s to %s" % (source_path,
+ dest_path))
+
+ ssh_cmd = '\"ssh -o StrictHostKeyChecking=no\"'
+
+ rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s'
+ % (ssh_cmd, source_path, dest_path))
+
+ logging.debug('rsync %s' % (' '.join(rsync_args, )))
+
+ rsync_proc = subprocess.Popen(rsync_args, stdout=subprocess.PIPE)
+ logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0])
+ logging.debug('Rsync return: %d' % rsync_proc.returncode)
+ if rsync_proc.returncode != 0:
+ raise Exception("Unexpected VHD transfer failure")
+ return ""
+
+
+if __name__ == '__main__':
+ XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd,
+ 'move_vhds_into_sr': move_vhds_into_sr, })
diff --git a/po/nova.pot b/po/nova.pot
index ce88d731b..58140302d 100644
--- a/po/nova.pot
+++ b/po/nova.pot
@@ -300,7 +300,7 @@ msgstr ""
msgid "instance %s: starting..."
msgstr ""
-#. pylint: disable-msg=W0702
+#. pylint: disable=W0702
#: ../nova/compute/manager.py:219
#, python-format
msgid "instance %s: Failed to spawn"
@@ -440,7 +440,7 @@ msgid ""
"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#. pylint: disable-msg=W0702
+#. pylint: disable=W0702
#. NOTE(vish): The inline callback eats the exception info so we
#. log the traceback here and reraise the same
#. ecxception below.
@@ -591,7 +591,7 @@ msgstr ""
msgid "Starting Bridge interface for %s"
msgstr ""
-#. pylint: disable-msg=W0703
+#. pylint: disable=W0703
#: ../nova/network/linux_net.py:314
#, python-format
msgid "Hupping dnsmasq threw %s"
@@ -602,7 +602,7 @@ msgstr ""
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#. pylint: disable-msg=W0703
+#. pylint: disable=W0703
#: ../nova/network/linux_net.py:358
#, python-format
msgid "killing radvd threw %s"
@@ -613,7 +613,7 @@ msgstr ""
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#. pylint: disable-msg=W0703
+#. pylint: disable=W0703
#: ../nova/network/linux_net.py:449
#, python-format
msgid "Killing dnsmasq threw %s"
diff --git a/pylintrc b/pylintrc
index f07b14980..135eea4d5 100644
--- a/pylintrc
+++ b/pylintrc
@@ -1,8 +1,12 @@
+# The format of this file isn't really documented; just use --generate-rcfile
+
[Messages Control]
+# NOTE(justinsb): We might want to have a 2nd strict pylintrc in future
+# C0111: Don't require docstrings on every method
# W0511: TODOs in code comments are fine.
# W0142: *args and **kwargs are fine.
# W0622: Redefining id is fine.
-disable-msg=W0511,W0142,W0622
+disable=C0111,W0511,W0142,W0622
[Basic]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
@@ -25,3 +29,10 @@ no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
max-public-methods=100
min-public-methods=0
max-args=6
+
+[Variables]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+# _ is used by our localization
+additional-builtins=_
diff --git a/run_tests.py b/run_tests.py
index 3c8d410e1..d5d8acd16 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -60,6 +60,8 @@ import os
import unittest
import sys
+gettext.install('nova', unicode=1)
+
from nose import config
from nose import core
from nose import result
diff --git a/run_tests.sh b/run_tests.sh
index 7ac3ff33f..8f4d37cd4 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -84,7 +84,7 @@ fi
if [ -z "$noseargs" ];
then
srcfiles=`find bin -type f ! -name "nova.conf*"`
- srcfiles+=" nova setup.py"
+ srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1
else
run_tests
diff --git a/setup.py b/setup.py
index 3b48990ac..20f4c1947 100644
--- a/setup.py
+++ b/setup.py
@@ -112,4 +112,5 @@ DistUtilsExtra.auto.setup(name='nova',
'bin/nova-spoolsentry',
'bin/stack',
'bin/nova-volume',
+ 'bin/nova-vncproxy',
'tools/nova-debug'])
diff --git a/smoketests/base.py b/smoketests/base.py
index 204b4a1eb..31d82b20b 100644
--- a/smoketests/base.py
+++ b/smoketests/base.py
@@ -31,17 +31,23 @@ from smoketests import flags
SUITE_NAMES = '[image, instance, volume]'
FLAGS = flags.FLAGS
flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
-boto_v6 = None
+flags.DEFINE_integer('ssh_tries', 3, 'Numer of times to try ssh')
class SmokeTestCase(unittest.TestCase):
def connect_ssh(self, ip, key_name):
- # TODO(devcamcar): set a more reasonable connection timeout time
key = paramiko.RSAKey.from_private_key_file('/tmp/%s.pem' % key_name)
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.WarningPolicy())
- client.connect(ip, username='root', pkey=key)
- return client
+ tries = 0
+ while(True):
+ try:
+ client = paramiko.SSHClient()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+ client.connect(ip, username='root', pkey=key, timeout=5)
+ return client
+ except (paramiko.AuthenticationException, paramiko.SSHException):
+ tries += 1
+ if tries == FLAGS.ssh_tries:
+ raise
def can_ping(self, ip, command="ping"):
"""Attempt to ping the specified IP, and give up after 1 second."""
@@ -147,8 +153,8 @@ class SmokeTestCase(unittest.TestCase):
except:
pass
- def bundle_image(self, image, kernel=False):
- cmd = 'euca-bundle-image -i %s' % image
+ def bundle_image(self, image, tempdir='/tmp', kernel=False):
+ cmd = 'euca-bundle-image -i %s -d %s' % (image, tempdir)
if kernel:
cmd += ' --kernel true'
status, output = commands.getstatusoutput(cmd)
@@ -157,9 +163,9 @@ class SmokeTestCase(unittest.TestCase):
raise Exception(output)
return True
- def upload_image(self, bucket_name, image):
+ def upload_image(self, bucket_name, image, tempdir='/tmp'):
cmd = 'euca-upload-bundle -b '
- cmd += '%s -m /tmp/%s.manifest.xml' % (bucket_name, image)
+ cmd += '%s -m %s/%s.manifest.xml' % (bucket_name, tempdir, image)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n %s' % (cmd, output)
@@ -176,6 +182,9 @@ class SmokeTestCase(unittest.TestCase):
TEST_DATA = {}
+if FLAGS.use_ipv6:
+ global boto_v6
+ boto_v6 = __import__('boto_v6')
class UserSmokeTestCase(SmokeTestCase):
@@ -183,29 +192,3 @@ class UserSmokeTestCase(SmokeTestCase):
global TEST_DATA
self.conn = self.connection_for_env()
self.data = TEST_DATA
-
-
-def run_tests(suites):
- argv = FLAGS(sys.argv)
- if FLAGS.use_ipv6:
- global boto_v6
- boto_v6 = __import__('boto_v6')
-
- if not os.getenv('EC2_ACCESS_KEY'):
- print >> sys.stderr, 'Missing EC2 environment variables. Please ' \
- 'source the appropriate novarc file before ' \
- 'running this test.'
- return 1
-
- if FLAGS.suite:
- try:
- suite = suites[FLAGS.suite]
- except KeyError:
- print >> sys.stderr, 'Available test suites:', \
- ', '.join(suites.keys())
- return 1
-
- unittest.TextTestRunner(verbosity=2).run(suite)
- else:
- for suite in suites.itervalues():
- unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/smoketests/proxy.sh b/smoketests/proxy.sh
index 9b3f3108a..b9057fe9d 100755
--- a/smoketests/proxy.sh
+++ b/smoketests/proxy.sh
@@ -11,12 +11,19 @@
mkfifo backpipe1
mkfifo backpipe2
+if nc -h 2>&1 | grep -i openbsd
+then
+ NC_LISTEN="nc -l"
+else
+ NC_LISTEN="nc -l -p"
+fi
+
# NOTE(vish): proxy metadata on port 80
while true; do
- nc -l -p 80 0<backpipe1 | nc 169.254.169.254 80 1>backpipe1
+ $NC_LISTEN 80 0<backpipe1 | nc 169.254.169.254 80 1>backpipe1
done &
# NOTE(vish): proxy google on port 8080
while true; do
- nc -l -p 8080 0<backpipe2 | nc 74.125.19.99 80 1>backpipe2
+ $NC_LISTEN 8080 0<backpipe2 | nc 74.125.19.99 80 1>backpipe2
done &
diff --git a/smoketests/public_network_smoketests.py b/smoketests/public_network_smoketests.py
index 5a4c67642..0ba477b7c 100644
--- a/smoketests/public_network_smoketests.py
+++ b/smoketests/public_network_smoketests.py
@@ -19,10 +19,8 @@
import commands
import os
import random
-import socket
import sys
import time
-import unittest
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -181,7 +179,3 @@ class InstanceTestsFromPublic(base.UserSmokeTestCase):
self.conn.delete_security_group(security_group_name)
if 'instance_id' in self.data:
self.conn.terminate_instances([self.data['instance_id']])
-
-if __name__ == "__main__":
- suites = {'instance': unittest.makeSuite(InstanceTestsFromPublic)}
- sys.exit(base.run_tests(suites))
diff --git a/smoketests/run_tests.py b/smoketests/run_tests.py
new file mode 100644
index 000000000..62bdfbec6
--- /dev/null
+++ b/smoketests/run_tests.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""Unittest runner for Nova.
+
+To run all tests
+ python run_tests.py
+
+To run a single test:
+ python run_tests.py test_compute:ComputeTestCase.test_run_terminate
+
+To run a single test module:
+ python run_tests.py test_compute
+
+ or
+
+ python run_tests.py api.test_wsgi
+
+"""
+
+import gettext
+import os
+import unittest
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+gettext.install('nova', unicode=1)
+
+from nose import config
+from nose import core
+from nose import result
+
+from smoketests import flags
+FLAGS = flags.FLAGS
+
+
+class _AnsiColorizer(object):
+ """
+ A colorizer is an object that loosely wraps around a stream, allowing
+ callers to write text to the stream in a particular color.
+
+ Colorizer classes must implement C{supported()} and C{write(text, color)}.
+ """
+ _colors = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, magenta=35, cyan=36, white=37)
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ """
+ A class method that returns True if the current platform supports
+ coloring terminal output using this method. Returns False otherwise.
+ """
+ if not stream.isatty():
+ return False # auto color only on TTYs
+ try:
+ import curses
+ except ImportError:
+ return False
+ else:
+ try:
+ try:
+ return curses.tigetnum("colors") > 2
+ except curses.error:
+ curses.setupterm()
+ return curses.tigetnum("colors") > 2
+ except:
+ raise
+ # guess false in case of error
+ return False
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ """
+ Write the given text to the stream in the given color.
+
+ @param text: Text to be written to the stream.
+
+ @param color: A string label for a color. e.g. 'red', 'white'.
+ """
+ color = self._colors[color]
+ self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
+
+
+class _Win32Colorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ from win32console import GetStdHandle, STD_OUT_HANDLE, \
+ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \
+ FOREGROUND_INTENSITY
+ red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
+ FOREGROUND_BLUE, FOREGROUND_INTENSITY)
+ self.stream = stream
+ self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
+ self._colors = {
+ 'normal': red | green | blue,
+ 'red': red | bold,
+ 'green': green | bold,
+ 'blue': blue | bold,
+ 'yellow': red | green | bold,
+ 'magenta': red | blue | bold,
+ 'cyan': green | blue | bold,
+ 'white': red | green | blue | bold
+ }
+
+ def supported(cls, stream=sys.stdout):
+ try:
+ import win32console
+ screenBuffer = win32console.GetStdHandle(
+ win32console.STD_OUT_HANDLE)
+ except ImportError:
+ return False
+ import pywintypes
+ try:
+ screenBuffer.SetConsoleTextAttribute(
+ win32console.FOREGROUND_RED |
+ win32console.FOREGROUND_GREEN |
+ win32console.FOREGROUND_BLUE)
+ except pywintypes.error:
+ return False
+ else:
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ color = self._colors[color]
+ self.screenBuffer.SetConsoleTextAttribute(color)
+ self.stream.write(text)
+ self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
+
+
+class _NullColorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ self.stream.write(text)
+
+
+class NovaTestResult(result.TextTestResult):
+ def __init__(self, *args, **kw):
+ result.TextTestResult.__init__(self, *args, **kw)
+ self._last_case = None
+ self.colorizer = None
+ # NOTE(vish): reset stdout for the terminal check
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
+ if colorizer.supported():
+ self.colorizer = colorizer(self.stream)
+ break
+ sys.stdout = stdout
+
+ def getDescription(self, test):
+ return str(test)
+
+ # NOTE(vish): copied from unittest with edit to add color
+ def addSuccess(self, test):
+ unittest.TestResult.addSuccess(self, test)
+ if self.showAll:
+ self.colorizer.write("OK", 'green')
+ self.stream.writeln()
+ elif self.dots:
+ self.stream.write('.')
+ self.stream.flush()
+
+ # NOTE(vish): copied from unittest with edit to add color
+ def addFailure(self, test, err):
+ unittest.TestResult.addFailure(self, test, err)
+ if self.showAll:
+ self.colorizer.write("FAIL", 'red')
+ self.stream.writeln()
+ elif self.dots:
+ self.stream.write('F')
+ self.stream.flush()
+
+ # NOTE(vish): copied from nose with edit to add color
+ def addError(self, test, err):
+ """Overrides normal addError to add support for
+ errorClasses. If the exception is a registered class, the
+ error will be added to the list for that class, not errors.
+ """
+ stream = getattr(self, 'stream', None)
+ ec, ev, tb = err
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3 compat
+ exc_info = self._exc_info_to_string(err)
+ for cls, (storage, label, isfail) in self.errorClasses.items():
+ if result.isclass(ec) and issubclass(ec, cls):
+ if isfail:
+ test.passed = False
+ storage.append((test, exc_info))
+ # Might get patched into a streamless result
+ if stream is not None:
+ if self.showAll:
+ message = [label]
+ detail = result._exception_detail(err[1])
+ if detail:
+ message.append(detail)
+ stream.writeln(": ".join(message))
+ elif self.dots:
+ stream.write(label[:1])
+ return
+ self.errors.append((test, exc_info))
+ test.passed = False
+ if stream is not None:
+ if self.showAll:
+ self.colorizer.write("ERROR", 'red')
+ self.stream.writeln()
+ elif self.dots:
+ stream.write('E')
+
+ def startTest(self, test):
+ unittest.TestResult.startTest(self, test)
+ current_case = test.test.__class__.__name__
+
+ if self.showAll:
+ if current_case != self._last_case:
+ self.stream.writeln(current_case)
+ self._last_case = current_case
+
+ self.stream.write(
+ ' %s' % str(test.test._testMethodName).ljust(60))
+ self.stream.flush()
+
+
+class NovaTestRunner(core.TextTestRunner):
+ def _makeResult(self):
+ return NovaTestResult(self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.config)
+
+
+if __name__ == '__main__':
+ if not os.getenv('EC2_ACCESS_KEY'):
+ print _('Missing EC2 environment variables. Please ' \
+ 'source the appropriate novarc file before ' \
+ 'running this test.')
+ sys.exit(1)
+
+ argv = FLAGS(sys.argv)
+ testdir = os.path.abspath("./")
+ c = config.Config(stream=sys.stdout,
+ env=os.environ,
+ verbosity=3,
+ workingDir=testdir,
+ plugins=core.DefaultPluginManager())
+
+ runner = NovaTestRunner(stream=c.stream,
+ verbosity=c.verbosity,
+ config=c)
+ sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
diff --git a/smoketests/admin_smoketests.py b/smoketests/test_admin.py
index 86a7f600d..1b7a8d673 100644
--- a/smoketests/admin_smoketests.py
+++ b/smoketests/test_admin.py
@@ -30,15 +30,11 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
-from nova import adminclient
from smoketests import flags
from smoketests import base
-SUITE_NAMES = '[user]'
-
FLAGS = flags.FLAGS
-flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
# TODO(devamcar): Use random tempfile
ZIP_FILENAME = '/tmp/nova-me-x509.zip'
@@ -50,6 +46,7 @@ TEST_PROJECTNAME = '%sproject' % TEST_PREFIX
class AdminSmokeTestCase(base.SmokeTestCase):
def setUp(self):
+ import nova_adminclient as adminclient
self.admin = adminclient.NovaAdminClient(
access_key=os.getenv('EC2_ACCESS_KEY'),
secret_key=os.getenv('EC2_SECRET_KEY'),
@@ -92,7 +89,3 @@ class UserTests(AdminSmokeTestCase):
os.remove(ZIP_FILENAME)
except:
pass
-
-if __name__ == "__main__":
- suites = {'user': unittest.makeSuite(UserTests)}
- sys.exit(base.run_tests(suites))
diff --git a/smoketests/netadmin_smoketests.py b/smoketests/test_netadmin.py
index 38beb8fdc..60086f065 100644
--- a/smoketests/netadmin_smoketests.py
+++ b/smoketests/test_netadmin.py
@@ -21,7 +21,6 @@ import os
import random
import sys
import time
-import unittest
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -74,8 +73,10 @@ class AddressTests(base.UserSmokeTestCase):
groups = self.conn.get_all_security_groups(['default'])
for rule in groups[0].rules:
if (rule.ip_protocol == 'tcp' and
- rule.from_port <= 22 and rule.to_port >= 22):
+ int(rule.from_port) <= 22 and
+ int(rule.to_port) >= 22):
ssh_authorized = True
+ break
if not ssh_authorized:
self.conn.authorize_security_group('default',
ip_protocol='tcp',
@@ -137,11 +138,6 @@ class SecurityGroupTests(base.UserSmokeTestCase):
if not self.wait_for_running(self.data['instance']):
self.fail('instance failed to start')
self.data['instance'].update()
- if not self.wait_for_ping(self.data['instance'].private_dns_name):
- self.fail('could not ping instance')
- if not self.wait_for_ssh(self.data['instance'].private_dns_name,
- TEST_KEY):
- self.fail('could not ssh to instance')
def test_003_can_authorize_security_group_ingress(self):
self.assertTrue(self.conn.authorize_security_group(TEST_GROUP,
@@ -185,10 +181,3 @@ class SecurityGroupTests(base.UserSmokeTestCase):
self.assertFalse(TEST_GROUP in [group.name for group in groups])
self.conn.terminate_instances([self.data['instance'].id])
self.assertTrue(self.conn.release_address(self.data['public_ip']))
-
-
-if __name__ == "__main__":
- suites = {'address': unittest.makeSuite(AddressTests),
- 'security_group': unittest.makeSuite(SecurityGroupTests)
- }
- sys.exit(base.run_tests(suites))
diff --git a/smoketests/sysadmin_smoketests.py b/smoketests/test_sysadmin.py
index e3b84d3d3..268d9865b 100644
--- a/smoketests/sysadmin_smoketests.py
+++ b/smoketests/test_sysadmin.py
@@ -16,12 +16,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import commands
import os
import random
import sys
import time
-import unittest
+import tempfile
+import shutil
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -34,8 +34,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from smoketests import flags
from smoketests import base
-
-
FLAGS = flags.FLAGS
flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
'Local kernel file to use for bundling tests')
@@ -46,12 +44,22 @@ TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
TEST_GROUP = '%s_group' % TEST_PREFIX
+
+
class ImageTests(base.UserSmokeTestCase):
def test_001_can_bundle_image(self):
- self.assertTrue(self.bundle_image(FLAGS.bundle_image))
+ self.data['tempdir'] = tempfile.mkdtemp()
+ self.assertTrue(self.bundle_image(FLAGS.bundle_image,
+ self.data['tempdir']))
def test_002_can_upload_image(self):
- self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_image))
+ try:
+ self.assertTrue(self.upload_image(TEST_BUCKET,
+ FLAGS.bundle_image,
+ self.data['tempdir']))
+ finally:
+ if os.path.exists(self.data['tempdir']):
+ shutil.rmtree(self.data['tempdir'])
def test_003_can_register_image(self):
image_id = self.conn.register_image('%s/%s.manifest.xml' %
@@ -148,7 +156,8 @@ class InstanceTests(base.UserSmokeTestCase):
self.fail('could not ping instance')
if FLAGS.use_ipv6:
- if not self.wait_for_ping(self.data['instance'].ip_v6, "ping6"):
+ if not self.wait_for_ping(self.data['instance'].dns_name_v6,
+ "ping6"):
self.fail('could not ping instance v6')
def test_005_can_ssh_to_private_ip(self):
@@ -157,7 +166,7 @@ class InstanceTests(base.UserSmokeTestCase):
self.fail('could not ssh to instance')
if FLAGS.use_ipv6:
- if not self.wait_for_ssh(self.data['instance'].ip_v6,
+ if not self.wait_for_ssh(self.data['instance'].dns_name_v6,
TEST_KEY):
self.fail('could not ssh to instance v6')
@@ -191,7 +200,7 @@ class VolumeTests(base.UserSmokeTestCase):
self.assertEqual(volume.size, 1)
self.data['volume'] = volume
# Give network time to find volume.
- time.sleep(10)
+ time.sleep(5)
def test_002_can_attach_volume(self):
volume = self.data['volume']
@@ -204,6 +213,8 @@ class VolumeTests(base.UserSmokeTestCase):
else:
self.fail('cannot attach volume with state %s' % volume.status)
+ # Give volume some time to be ready.
+ time.sleep(5)
volume.attach(self.data['instance'].id, self.device)
# wait
@@ -218,7 +229,7 @@ class VolumeTests(base.UserSmokeTestCase):
self.assertTrue(volume.status.startswith('in-use'))
# Give instance time to recognize volume.
- time.sleep(10)
+ time.sleep(5)
def test_003_can_mount_volume(self):
ip = self.data['instance'].private_dns_name
@@ -255,12 +266,14 @@ class VolumeTests(base.UserSmokeTestCase):
ip = self.data['instance'].private_dns_name
conn = self.connect_ssh(ip, TEST_KEY)
stdin, stdout, stderr = conn.exec_command(
- "df -h | grep %s | awk {'print $2'}" % self.device)
- out = stdout.read()
+ "cat /sys/class/block/%s/size" % self.device.rpartition('/')[2])
+ out = stdout.read().strip()
conn.close()
- if not out.strip() == '1007.9M':
- self.fail('Volume is not the right size: %s %s' %
- (out, stderr.read()))
+ # NOTE(vish): 1G bytes / 512 bytes per block
+ expected_size = 1024 * 1024 * 1024 / 512
+ self.assertEquals('%s' % (expected_size,), out,
+ 'Volume is not the right size: %s %s. Expected: %s' %
+ (out, stderr.read(), expected_size))
def test_006_me_can_umount_volume(self):
ip = self.data['instance'].private_dns_name
@@ -283,11 +296,3 @@ class VolumeTests(base.UserSmokeTestCase):
def test_999_tearDown(self):
self.conn.terminate_instances([self.data['instance'].id])
self.conn.delete_key_pair(TEST_KEY)
-
-
-if __name__ == "__main__":
- suites = {'image': unittest.makeSuite(ImageTests),
- 'instance': unittest.makeSuite(InstanceTests),
- 'volume': unittest.makeSuite(VolumeTests)
- }
- sys.exit(base.run_tests(suites))
diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py
new file mode 100644
index 000000000..bbf3ea908
--- /dev/null
+++ b/tools/esx/guest_tool.py
@@ -0,0 +1,345 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Guest tools for ESX to set up network in the guest.
+On Windows we require pyWin32 installed on Python.
+"""
+
+import array
+import logging
+import os
+import platform
+import socket
+import struct
+import subprocess
+import sys
+import time
+
+PLATFORM_WIN = 'win32'
+PLATFORM_LINUX = 'linux2'
+ARCH_32_BIT = '32bit'
+ARCH_64_BIT = '64bit'
+NO_MACHINE_ID = 'No machine id'
+
+# Logging
+FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
+if sys.platform == PLATFORM_WIN:
+ LOG_DIR = os.path.join(os.environ.get('ALLUSERSPROFILE'), 'openstack')
+elif sys.platform == PLATFORM_LINUX:
+ LOG_DIR = '/var/log/openstack'
+else:
+ LOG_DIR = 'logs'
+if not os.path.exists(LOG_DIR):
+ os.mkdir(LOG_DIR)
+LOG_FILENAME = os.path.join(LOG_DIR, 'openstack-guest-tools.log')
+logging.basicConfig(filename=LOG_FILENAME, format=FORMAT)
+
+if sys.hexversion < 0x3000000:
+ _byte = ord # 2.x chr to integer
+else:
+ _byte = int # 3.x byte to integer
+
+
+class ProcessExecutionError:
+ """Process Execution Error Class."""
+
+ def __init__(self, exit_code, stdout, stderr, cmd):
+ self.exit_code = exit_code
+ self.stdout = stdout
+ self.stderr = stderr
+ self.cmd = cmd
+
+ def __str__(self):
+ return str(self.exit_code)
+
+
+def _bytes2int(bytes):
+ """Convert bytes to int."""
+ intgr = 0
+ for byt in bytes:
+ intgr = (intgr << 8) + _byte(byt)
+ return intgr
+
+
+def _parse_network_details(machine_id):
+ """
+ Parse the machine.id field to get MAC, IP, Netmask and Gateway fields
+ machine.id is of the form MAC;IP;Netmask;Gateway;Broadcast;DNS1,DNS2
+ where ';' is the separator.
+ """
+ network_details = []
+ if machine_id[1].strip() == "1":
+ pass
+ else:
+ network_info_list = machine_id[0].split(';')
+ assert len(network_info_list) % 6 == 0
+ no_grps = len(network_info_list) / 6
+ i = 0
+ while i < no_grps:
+ k = i * 6
+ network_details.append((
+ network_info_list[k].strip().lower(),
+ network_info_list[k + 1].strip(),
+ network_info_list[k + 2].strip(),
+ network_info_list[k + 3].strip(),
+ network_info_list[k + 4].strip(),
+ network_info_list[k + 5].strip().split(',')))
+ i += 1
+ return network_details
+
+
+def _get_windows_network_adapters():
+ """Get the list of windows network adapters."""
+ import win32com.client
+ wbem_locator = win32com.client.Dispatch('WbemScripting.SWbemLocator')
+ wbem_service = wbem_locator.ConnectServer('.', 'root\cimv2')
+ wbem_network_adapters = wbem_service.InstancesOf('Win32_NetworkAdapter')
+ network_adapters = []
+ for wbem_network_adapter in wbem_network_adapters:
+ if wbem_network_adapter.NetConnectionStatus == 2 or \
+ wbem_network_adapter.NetConnectionStatus == 7:
+ adapter_name = wbem_network_adapter.NetConnectionID
+ mac_address = wbem_network_adapter.MacAddress.lower()
+ wbem_network_adapter_config = \
+ wbem_network_adapter.associators_(
+ 'Win32_NetworkAdapterSetting',
+ 'Win32_NetworkAdapterConfiguration')[0]
+ ip_address = ''
+ subnet_mask = ''
+ if wbem_network_adapter_config.IPEnabled:
+ ip_address = wbem_network_adapter_config.IPAddress[0]
+ subnet_mask = wbem_network_adapter_config.IPSubnet[0]
+ #wbem_network_adapter_config.DefaultIPGateway[0]
+ network_adapters.append({'name': adapter_name,
+ 'mac-address': mac_address,
+ 'ip-address': ip_address,
+ 'subnet-mask': subnet_mask})
+ return network_adapters
+
+
+def _get_linux_network_adapters():
+ """Get the list of Linux network adapters."""
+ import fcntl
+ max_bytes = 8096
+ arch = platform.architecture()[0]
+ if arch == ARCH_32_BIT:
+ offset1 = 32
+ offset2 = 32
+ elif arch == ARCH_64_BIT:
+ offset1 = 16
+ offset2 = 40
+ else:
+ raise OSError(_("Unknown architecture: %s") % arch)
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ names = array.array('B', '\0' * max_bytes)
+ outbytes = struct.unpack('iL', fcntl.ioctl(
+ sock.fileno(),
+ 0x8912,
+ struct.pack('iL', max_bytes, names.buffer_info()[0])))[0]
+ adapter_names = \
+ [names.tostring()[n_counter:n_counter + offset1].split('\0', 1)[0]
+ for n_counter in xrange(0, outbytes, offset2)]
+ network_adapters = []
+ for adapter_name in adapter_names:
+ ip_address = socket.inet_ntoa(fcntl.ioctl(
+ sock.fileno(),
+ 0x8915,
+ struct.pack('256s', adapter_name))[20:24])
+ subnet_mask = socket.inet_ntoa(fcntl.ioctl(
+ sock.fileno(),
+ 0x891b,
+ struct.pack('256s', adapter_name))[20:24])
+ raw_mac_address = '%012x' % _bytes2int(fcntl.ioctl(
+ sock.fileno(),
+ 0x8927,
+ struct.pack('256s', adapter_name))[18:24])
+ mac_address = ":".join([raw_mac_address[m_counter:m_counter + 2]
+ for m_counter in range(0, len(raw_mac_address), 2)]).lower()
+ network_adapters.append({'name': adapter_name,
+ 'mac-address': mac_address,
+ 'ip-address': ip_address,
+ 'subnet-mask': subnet_mask})
+ return network_adapters
+
+
+def _get_adapter_name_and_ip_address(network_adapters, mac_address):
+ """Get the adapter name based on the MAC address."""
+ adapter_name = None
+ ip_address = None
+ for network_adapter in network_adapters:
+ if network_adapter['mac-address'] == mac_address.lower():
+ adapter_name = network_adapter['name']
+ ip_address = network_adapter['ip-address']
+ break
+ return adapter_name, ip_address
+
+
+def _get_win_adapter_name_and_ip_address(mac_address):
+ """Get Windows network adapter name."""
+ network_adapters = _get_windows_network_adapters()
+ return _get_adapter_name_and_ip_address(network_adapters, mac_address)
+
+
+def _get_linux_adapter_name_and_ip_address(mac_address):
+ """Get Linux network adapter name."""
+ network_adapters = _get_linux_network_adapters()
+ return _get_adapter_name_and_ip_address(network_adapters, mac_address)
+
+
+def _execute(cmd_list, process_input=None, check_exit_code=True):
+ """Executes the command with the list of arguments specified."""
+ cmd = ' '.join(cmd_list)
+ logging.debug(_("Executing command: '%s'") % cmd)
+ env = os.environ.copy()
+ obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
+ result = None
+ if process_input != None:
+ result = obj.communicate(process_input)
+ else:
+ result = obj.communicate()
+ obj.stdin.close()
+ if obj.returncode:
+ logging.debug(_("Result was %s") % obj.returncode)
+ if check_exit_code and obj.returncode != 0:
+ (stdout, stderr) = result
+ raise ProcessExecutionError(exit_code=obj.returncode,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=cmd)
+ time.sleep(0.1)
+ return result
+
+
+def _windows_set_networking():
+ """Set IP address for the windows VM."""
+ program_files = os.environ.get('PROGRAMFILES')
+ program_files_x86 = os.environ.get('PROGRAMFILES(X86)')
+ vmware_tools_bin = None
+ if os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
+ 'vmtoolsd.exe')):
+ vmware_tools_bin = os.path.join(program_files, 'VMware',
+ 'VMware Tools', 'vmtoolsd.exe')
+ elif os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
+ 'VMwareService.exe')):
+ vmware_tools_bin = os.path.join(program_files, 'VMware',
+ 'VMware Tools', 'VMwareService.exe')
+ elif program_files_x86 and os.path.exists(os.path.join(program_files_x86,
+ 'VMware', 'VMware Tools',
+ 'VMwareService.exe')):
+ vmware_tools_bin = os.path.join(program_files_x86, 'VMware',
+ 'VMware Tools', 'VMwareService.exe')
+ if vmware_tools_bin:
+ cmd = ['"' + vmware_tools_bin + '"', '--cmd', 'machine.id.get']
+ for network_detail in _parse_network_details(_execute(cmd,
+ check_exit_code=False)):
+ mac_address, ip_address, subnet_mask, gateway, broadcast,\
+ dns_servers = network_detail
+ adapter_name, current_ip_address = \
+ _get_win_adapter_name_and_ip_address(mac_address)
+ if adapter_name and not ip_address == current_ip_address:
+ cmd = ['netsh', 'interface', 'ip', 'set', 'address',
+ 'name="%s"' % adapter_name, 'source=static', ip_address,
+ subnet_mask, gateway, '1']
+ _execute(cmd)
+ # Windows doesn't let you manually set the broadcast address
+ for dns_server in dns_servers:
+ if dns_server:
+ cmd = ['netsh', 'interface', 'ip', 'add', 'dns',
+ 'name="%s"' % adapter_name, dns_server]
+ _execute(cmd)
+ else:
+ logging.warn(_("VMware Tools is not installed"))
+
+
+def _filter_duplicates(all_entries):
+ final_list = []
+ for entry in all_entries:
+ if entry and entry not in final_list:
+ final_list.append(entry)
+ return final_list
+
+
+def _set_rhel_networking(network_details=[]):
+ all_dns_servers = []
+ for network_detail in network_details:
+ mac_address, ip_address, subnet_mask, gateway, broadcast,\
+ dns_servers = network_detail
+ all_dns_servers.extend(dns_servers)
+ adapter_name, current_ip_address = \
+ _get_linux_adapter_name_and_ip_address(mac_address)
+ if adapter_name and not ip_address == current_ip_address:
+ interface_file_name = \
+ '/etc/sysconfig/network-scripts/ifcfg-%s' % adapter_name
+ # Remove file
+ os.remove(interface_file_name)
+ # Touch file
+ _execute(['touch', interface_file_name])
+ interface_file = open(interface_file_name, 'w')
+ interface_file.write('\nDEVICE=%s' % adapter_name)
+ interface_file.write('\nUSERCTL=yes')
+ interface_file.write('\nONBOOT=yes')
+ interface_file.write('\nBOOTPROTO=static')
+ interface_file.write('\nBROADCAST=%s' % broadcast)
+ interface_file.write('\nNETWORK=')
+ interface_file.write('\nGATEWAY=%s' % gateway)
+ interface_file.write('\nNETMASK=%s' % subnet_mask)
+ interface_file.write('\nIPADDR=%s' % ip_address)
+ interface_file.write('\nMACADDR=%s' % mac_address)
+ interface_file.close()
+ if all_dns_servers:
+ dns_file_name = "/etc/resolv.conf"
+ os.remove(dns_file_name)
+ _execute(['touch', dns_file_name])
+ dns_file = open(dns_file_name, 'w')
+ dns_file.write("; generated by OpenStack guest tools")
+ unique_entries = _filter_duplicates(all_dns_servers)
+ for dns_server in unique_entries:
+ dns_file.write("\nnameserver %s" % dns_server)
+ dns_file.close()
+ _execute(['/sbin/service', 'network', 'restart'])
+
+
+def _linux_set_networking():
+ """Set IP address for the Linux VM."""
+ vmware_tools_bin = None
+ if os.path.exists('/usr/sbin/vmtoolsd'):
+ vmware_tools_bin = '/usr/sbin/vmtoolsd'
+ elif os.path.exists('/usr/bin/vmtoolsd'):
+ vmware_tools_bin = '/usr/bin/vmtoolsd'
+ elif os.path.exists('/usr/sbin/vmware-guestd'):
+ vmware_tools_bin = '/usr/sbin/vmware-guestd'
+ elif os.path.exists('/usr/bin/vmware-guestd'):
+ vmware_tools_bin = '/usr/bin/vmware-guestd'
+ if vmware_tools_bin:
+ cmd = [vmware_tools_bin, '--cmd', 'machine.id.get']
+ network_details = _parse_network_details(_execute(cmd,
+ check_exit_code=False))
+ # TODO(sateesh): For other distros like ubuntu, suse, debian, BSD, etc.
+ _set_rhel_networking(network_details)
+ else:
+ logging.warn(_("VMware Tools is not installed"))
+
+if __name__ == '__main__':
+ pltfrm = sys.platform
+ if pltfrm == PLATFORM_WIN:
+ _windows_set_networking()
+ elif pltfrm == PLATFORM_LINUX:
+ _linux_set_networking()
+ else:
+ raise NotImplementedError(_("Platform not implemented: '%s'") % pltfrm)
diff --git a/tools/euca-get-ajax-console b/tools/euca-get-ajax-console
index 37060e74f..a67c79d90 100755
--- a/tools/euca-get-ajax-console
+++ b/tools/euca-get-ajax-console
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -35,7 +35,8 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
import boto
import nova
from boto.ec2.connection import EC2Connection
-from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed
+import euca2ools
+from euca2ools import Euca2ool, InstanceValidationError, Util
usage_string = """
Retrieves a url to an ajax console terminal
@@ -93,8 +94,13 @@ def override_connect_ec2(aws_access_key_id=None,
aws_secret_access_key, **kwargs)
# override boto's connect_ec2 method, so that we can use NovaEC2Connection
+# (This is for Euca2ools 1.2)
boto.connect_ec2 = override_connect_ec2
+# Override Euca2ools' EC2Connection class (which it gets from boto)
+# (This is for Euca2ools 1.3)
+euca2ools.EC2Connection = NovaEC2Connection
+
def usage(status=1):
print usage_string
@@ -147,7 +153,7 @@ def main():
try:
euca_conn = euca.make_connection()
- except ConnectionFailed, e:
+ except Exception, e:
print e.message
sys.exit(1)
try:
diff --git a/tools/pip-requires b/tools/pip-requires
index 3587df644..6ea446493 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -10,6 +10,7 @@ boto==1.9b
carrot==0.10.5
eventlet==0.9.12
lockfile==0.8
+python-novaclient==2.3
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
@@ -29,3 +30,5 @@ sqlalchemy-migrate
netaddr
sphinx
glance
+nova-adminclient
+suds==0.4