summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorBen Swartzlander <bswartz@netapp.com>2012-09-01 23:39:39 -0400
committerBen Swartzlander <bswartz@netapp.com>2012-09-12 12:43:44 -0400
commit772c5d47d5bdffcd4ff8e09f4116d22568bf6eb9 (patch)
treec2a11bc6bb2dbe9e78e1f6f603e86042a5bb5087 /nova
parent76d094eeba1bcbba16d24e40aea24bb7729b4a30 (diff)
downloadnova-772c5d47d5bdffcd4ff8e09f4116d22568bf6eb9.tar.gz
nova-772c5d47d5bdffcd4ff8e09f4116d22568bf6eb9.tar.xz
nova-772c5d47d5bdffcd4ff8e09f4116d22568bf6eb9.zip
Backport changes from Cinder to Nova-Volume
NetApp C-mode driver. Generic NFS-based block device driver. NetApp NFS-based block device driver. blueprint netapp-volume-driver-cmode blueprint nfs-files-as-virtual-block-devices blueprint netapp-nfs-cinder-driver bug 1037619 bug 1037622 Change-Id: I513c3f88bcb03f3b71a453f92f5912d7730a8bbc
Diffstat (limited to 'nova')
-rw-r--r--nova/exception.py12
-rw-r--r--nova/tests/test_netapp.py389
-rw-r--r--nova/tests/test_netapp_nfs.py261
-rw-r--r--nova/tests/test_nfs.py629
-rw-r--r--nova/volume/netapp.py294
-rw-r--r--nova/volume/netapp_nfs.py267
-rw-r--r--nova/volume/nfs.py293
7 files changed, 2145 insertions, 0 deletions
diff --git a/nova/exception.py b/nova/exception.py
index cd1eabc9d..36d8f051c 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1021,6 +1021,18 @@ class VolumeBackendAPIException(NovaException):
"backend API: data=%(data)s")
+class NfsException(NovaException):
+ message = _("Unknown NFS exception")
+
+
+class NfsNoSharesMounted(NotFound):
+ message = _("No mounted NFS shares found")
+
+
+class NfsNoSuitableShareFound(NotFound):
+ message = _("There is no share which can host %(volume_size)sG")
+
+
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
diff --git a/nova/tests/test_netapp.py b/nova/tests/test_netapp.py
index 1fd95308d..79a8526ee 100644
--- a/nova/tests/test_netapp.py
+++ b/nova/tests/test_netapp.py
@@ -989,3 +989,392 @@ class NetAppDriverTestCase(test.TestCase):
properties = connection_info['data']
self.driver.terminate_connection(volume, connector)
self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID)
+
+
+WSDL_HEADER_CMODE = """<?xml version="1.0" encoding="UTF-8"?>
+<definitions xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
+ xmlns:na="http://cloud.netapp.com/"
+xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+xmlns="http://schemas.xmlsoap.org/wsdl/"
+targetNamespace="http://cloud.netapp.com/" name="CloudStorageService">
+"""
+
+WSDL_TYPES_CMODE = """<types>
+<xs:schema xmlns:na="http://cloud.netapp.com/"
+xmlns:xs="http://www.w3.org/2001/XMLSchema" version="1.0"
+targetNamespace="http://cloud.netapp.com/">
+
+ <xs:element name="ProvisionLun">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Name" type="xs:string"/>
+ <xs:element name="Size" type="xsd:long"/>
+ <xs:element name="Metadata" type="na:Metadata" minOccurs="0"
+ maxOccurs="unbounded"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="ProvisionLunResult">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Lun" type="na:Lun"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="DestroyLun">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Handle" type="xsd:string"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="DestroyLunResult">
+ <xs:complexType>
+ <xs:all/>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="CloneLun">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Handle" type="xsd:string"/>
+ <xs:element name="NewName" type="xsd:string"/>
+ <xs:element name="Metadata" type="na:Metadata" minOccurs="0"
+ maxOccurs="unbounded"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="CloneLunResult">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Lun" type="na:Lun"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="MapLun">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Handle" type="xsd:string"/>
+ <xs:element name="InitiatorType" type="xsd:string"/>
+ <xs:element name="InitiatorName" type="xsd:string"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="MapLunResult">
+ <xs:complexType>
+ <xs:all/>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="UnmapLun">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Handle" type="xsd:string"/>
+ <xs:element name="InitiatorType" type="xsd:string"/>
+ <xs:element name="InitiatorName" type="xsd:string"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="UnmapLunResult">
+ <xs:complexType>
+ <xs:all/>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="ListLuns">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="NameFilter" type="xsd:string" minOccurs="0"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="ListLunsResult">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Lun" type="na:Lun" minOccurs="0"
+ maxOccurs="unbounded"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="GetLunTargetDetails">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="Handle" type="xsd:string"/>
+ <xs:element name="InitiatorType" type="xsd:string"/>
+ <xs:element name="InitiatorName" type="xsd:string"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="GetLunTargetDetailsResult">
+ <xs:complexType>
+ <xs:all>
+ <xs:element name="TargetDetails" type="na:TargetDetails"
+ minOccurs="0" maxOccurs="unbounded"/>
+ </xs:all>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:complexType name="Metadata">
+ <xs:sequence>
+ <xs:element name="Key" type="xs:string"/>
+ <xs:element name="Value" type="xs:string"/>
+ </xs:sequence>
+ </xs:complexType>
+
+ <xs:complexType name="Lun">
+ <xs:sequence>
+ <xs:element name="Name" type="xs:string"/>
+ <xs:element name="Size" type="xs:long"/>
+ <xs:element name="Handle" type="xs:string"/>
+ <xs:element name="Metadata" type="na:Metadata" minOccurs="0"
+ maxOccurs="unbounded"/>
+ </xs:sequence>
+ </xs:complexType>
+
+ <xs:complexType name="TargetDetails">
+ <xs:sequence>
+ <xs:element name="Address" type="xs:string"/>
+ <xs:element name="Port" type="xs:int"/>
+ <xs:element name="Portal" type="xs:int"/>
+ <xs:element name="Iqn" type="xs:string"/>
+ <xs:element name="LunNumber" type="xs:int"/>
+ </xs:sequence>
+ </xs:complexType>
+
+ </xs:schema></types>"""
+
+WSDL_TRAILER_CMODE = """<service name="CloudStorageService">
+ <port name="CloudStoragePort" binding="na:CloudStorageBinding">
+ <soap:address location="http://hostname:8080/ws/ntapcloud"/>
+ </port>
+ </service>
+</definitions>"""
+
+RESPONSE_PREFIX_CMODE = """<?xml version='1.0' encoding='UTF-8'?>
+<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
+<soapenv:Body>"""
+
+RESPONSE_SUFFIX_CMODE = """</soapenv:Body></soapenv:Envelope>"""
+
+CMODE_APIS = ['ProvisionLun', 'DestroyLun', 'CloneLun', 'MapLun', 'UnmapLun',
+ 'ListLuns', 'GetLunTargetDetails']
+
+
+class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """HTTP handler that fakes enough stuff to allow the driver to run"""
+
+ def do_GET(s):
+ """Respond to a GET request."""
+ if '/ntap_cloud.wsdl' != s.path:
+ s.send_response(404)
+ s.end_headers
+ return
+ s.send_response(200)
+ s.send_header("Content-Type", "application/wsdl+xml")
+ s.end_headers()
+ out = s.wfile
+ out.write(WSDL_HEADER_CMODE)
+ out.write(WSDL_TYPES_CMODE)
+ for api in CMODE_APIS:
+ out.write('<message name="%sRequest">' % api)
+ out.write('<part element="na:%s" name="req"/>' % api)
+ out.write('</message>')
+ out.write('<message name="%sResponse">' % api)
+ out.write('<part element="na:%sResult" name="res"/>' % api)
+ out.write('</message>')
+ out.write('<portType name="CloudStorage">')
+ for api in CMODE_APIS:
+ out.write('<operation name="%s">' % api)
+ out.write('<input message="na:%sRequest"/>' % api)
+ out.write('<output message="na:%sResponse"/>' % api)
+ out.write('</operation>')
+ out.write('</portType>')
+ out.write('<binding name="CloudStorageBinding" '
+ 'type="na:CloudStorage">')
+ out.write('<soap:binding style="document" ' +
+ 'transport="http://schemas.xmlsoap.org/soap/http"/>')
+ for api in CMODE_APIS:
+ out.write('<operation name="%s">' % api)
+ out.write('<soap:operation soapAction=""/>')
+ out.write('<input><soap:body use="literal"/></input>')
+ out.write('<output><soap:body use="literal"/></output>')
+ out.write('</operation>')
+ out.write('</binding>')
+ out.write(WSDL_TRAILER_CMODE)
+
+ def do_POST(s):
+ """Respond to a POST request."""
+ if '/ws/ntapcloud' != s.path:
+ s.send_response(404)
+ s.end_headers
+ return
+ request_xml = s.rfile.read(int(s.headers['Content-Length']))
+ ntap_ns = 'http://cloud.netapp.com/'
+ nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
+ 'na': ntap_ns}
+ root = etree.fromstring(request_xml)
+
+ body = root.xpath('/soapenv:Envelope/soapenv:Body',
+ namespaces=nsmap)[0]
+ request = body.getchildren()[0]
+ tag = request.tag
+ if not tag.startswith('{' + ntap_ns + '}'):
+ s.send_response(500)
+ s.end_headers
+ return
+ api = tag[(2 + len(ntap_ns)):]
+ if 'ProvisionLun' == api:
+ body = """<ns:ProvisionLunResult xmlns:ns=
+ "http://cloud.netapp.com/">
+ <Lun><Name>lun1</Name><Size>20</Size>
+ <Handle>1d9c006c-a406-42f6-a23f-5ed7a6dc33e3</Handle>
+ <Metadata><Key>OsType</Key>
+ <Value>linux</Value></Metadata></Lun>
+ </ns:ProvisionLunResult>"""
+ elif 'DestroyLun' == api:
+ body = """<ns:DestroyLunResult xmlns:ns="http://cloud.netapp.com/"
+ />"""
+ elif 'CloneLun' == api:
+ body = """<ns:CloneLunResult xmlns:ns="http://cloud.netapp.com/">
+ <Lun><Name>lun2</Name><Size>2</Size>
+ <Handle>98ea1791d228453899d422b4611642c3</Handle>
+ <Metadata><Key>OsType</Key>
+ <Value>linux</Value></Metadata>
+ </Lun></ns:CloneLunResult>"""
+ elif 'MapLun' == api:
+ body = """<ns1:MapLunResult xmlns:ns="http://cloud.netapp.com/"
+ />"""
+ elif 'Unmap' == api:
+ body = """<ns1:UnmapLunResult xmlns:ns="http://cloud.netapp.com/"
+ />"""
+ elif 'ListLuns' == api:
+ body = """<ns:ListLunsResult xmlns:ns="http://cloud.netapp.com/">
+ <Lun>
+ <Name>lun1</Name>
+ <Size>20</Size>
+ <Handle>asdjdnsd</Handle>
+ </Lun>
+ </ns:ListLunsResult>"""
+ elif 'GetLunTargetDetails' == api:
+ body = """<ns:GetLunTargetDetailsResult
+ xmlns:ns="http://cloud.netapp.com/">
+ <TargetDetail>
+ <Address>1.2.3.4</Address>
+ <Port>3260</Port>
+ <Portal>1000</Portal>
+ <Iqn>iqn.199208.com.netapp:sn.123456789</Iqn>
+ <LunNumber>0</LunNumber>
+ </TargetDetail>
+ </ns:GetLunTargetDetailsResult>"""
+ else:
+ # Unknown API
+ s.send_response(500)
+ s.end_headers
+ return
+ s.send_response(200)
+ s.send_header("Content-Type", "text/xml; charset=utf-8")
+ s.end_headers()
+ s.wfile.write(RESPONSE_PREFIX_CMODE)
+ s.wfile.write(body)
+ s.wfile.write(RESPONSE_SUFFIX_CMODE)
+
+
+class FakeCmodeHTTPConnection(object):
+ """A fake httplib.HTTPConnection for netapp tests
+
+ Requests made via this connection actually get translated and routed into
+ the fake Dfm handler above, we then turn the response into
+ the httplib.HTTPResponse that the caller expects.
+ """
+ def __init__(self, host, timeout=None):
+ self.host = host
+
+ def request(self, method, path, data=None, headers=None):
+ if not headers:
+ headers = {}
+ req_str = '%s %s HTTP/1.1\r\n' % (method, path)
+ for key, value in headers.iteritems():
+ req_str += "%s: %s\r\n" % (key, value)
+ if data:
+ req_str += '\r\n%s' % data
+
+ # NOTE(vish): normally the http transport normailizes from unicode
+ sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
+ # NOTE(vish): stop the server from trying to look up address from
+ # the fake socket
+ FakeCMODEServerHandler.address_string = lambda x: '127.0.0.1'
+ self.app = FakeCMODEServerHandler(sock, '127.0.0.1:8080', None)
+
+ self.sock = FakeHttplibSocket(sock.result)
+ self.http_response = httplib.HTTPResponse(self.sock)
+
+ def set_debuglevel(self, level):
+ pass
+
+ def getresponse(self):
+ self.http_response.begin()
+ return self.http_response
+
+ def getresponsebody(self):
+ return self.sock.result
+
+
+class NetAppCmodeISCSIDriverTestCase(test.TestCase):
+ """Test case for NetAppISCSIDriver"""
+ volume = {
+ 'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None
+ }
+ snapshot = {
+ 'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
+ 'volume_size': 1, 'project_id': 'project'
+ }
+ volume_sec = {
+ 'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None
+ }
+
+ def setUp(self):
+ super(NetAppCmodeISCSIDriverTestCase, self).setUp()
+ driver = netapp.NetAppCmodeISCSIDriver()
+ self.stubs.Set(httplib, 'HTTPConnection', FakeCmodeHTTPConnection)
+ driver._create_client(wsdl_url='http://localhost:8080/ntap_cloud.wsdl',
+ login='root', password='password',
+ hostname='localhost', port=8080, cache=False)
+ self.driver = driver
+
+ def test_connect(self):
+ self.driver.check_for_setup_error()
+
+ def test_create_destroy(self):
+ self.driver.create_volume(self.volume)
+ self.driver.delete_volume(self.volume)
+
+ def test_create_vol_snapshot_destroy(self):
+ self.driver.create_volume(self.volume)
+ self.driver.create_snapshot(self.snapshot)
+ self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot)
+ self.driver.delete_snapshot(self.snapshot)
+ self.driver.delete_volume(self.volume)
+
+ def test_map_unmap(self):
+ self.driver.create_volume(self.volume)
+ updates = self.driver.create_export(None, self.volume)
+ self.assertTrue(updates['provider_location'])
+ self.volume['provider_location'] = updates['provider_location']
+ connector = {'initiator': 'init1'}
+ connection_info = self.driver.initialize_connection(self.volume,
+ connector)
+ self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
+ properties = connection_info['data']
+ self.driver.terminate_connection(self.volume, connector)
+ self.driver.delete_volume(self.volume)
diff --git a/nova/tests/test_netapp_nfs.py b/nova/tests/test_netapp_nfs.py
new file mode 100644
index 000000000..2a0b4ffde
--- /dev/null
+++ b/nova/tests/test_netapp_nfs.py
@@ -0,0 +1,261 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
+
+from nova import context
+from nova import exception
+from nova import test
+
+from nova.volume import netapp
+from nova.volume import netapp_nfs
+from nova.volume import nfs
+
+from mox import IgnoreArg
+from mox import IsA
+from mox import MockObject
+
+import mox
+import suds
+import types
+
+
+class FakeVolume(object):
+ def __init__(self, size=0):
+ self.size = size
+ self.id = hash(self)
+ self.name = None
+
+ def __getitem__(self, key):
+ return self.__dict__[key]
+
+
+class FakeSnapshot(object):
+ def __init__(self, volume_size=0):
+ self.volume_name = None
+ self.name = None
+ self.volume_id = None
+ self.volume_size = volume_size
+ self.user_id = None
+ self.status = None
+
+ def __getitem__(self, key):
+ return self.__dict__[key]
+
+
+class FakeResponce(object):
+ def __init__(self, status):
+ """
+ :param status: Either 'failed' or 'passed'
+ """
+ self.Status = status
+
+ if status == 'failed':
+ self.Reason = 'Sample error'
+
+
+class NetappNfsDriverTestCase(test.TestCase):
+ """Test case for NetApp specific NFS clone driver"""
+
+ def setUp(self):
+ self._driver = netapp_nfs.NetAppNFSDriver()
+ self._mox = mox.Mox()
+
+ def tearDown(self):
+ self._mox.UnsetStubs()
+
+ def test_check_for_setup_error(self):
+ mox = self._mox
+ drv = self._driver
+ required_flags = [
+ 'netapp_wsdl_url',
+ 'netapp_login',
+ 'netapp_password',
+ 'netapp_server_hostname',
+ 'netapp_server_port'
+ ]
+
+ # check exception raises when flags are not set
+ self.assertRaises(exception.NovaException,
+ drv.check_for_setup_error)
+
+ # set required flags
+ for flag in required_flags:
+ setattr(netapp.FLAGS, flag, 'val')
+
+ mox.StubOutWithMock(nfs.NfsDriver, 'check_for_setup_error')
+ nfs.NfsDriver.check_for_setup_error()
+ mox.ReplayAll()
+
+ drv.check_for_setup_error()
+
+ mox.VerifyAll()
+
+ # restore initial FLAGS
+ for flag in required_flags:
+ delattr(netapp.FLAGS, flag)
+
+ def test_do_setup(self):
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, 'check_for_setup_error')
+ mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, '_get_client')
+
+ drv.check_for_setup_error()
+ netapp_nfs.NetAppNFSDriver._get_client()
+
+ mox.ReplayAll()
+
+ drv.do_setup(IsA(context.RequestContext))
+
+ mox.VerifyAll()
+
+ def test_create_snapshot(self):
+ """Test snapshot can be created and deleted"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_clone_volume')
+ drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
+ mox.ReplayAll()
+
+ drv.create_snapshot(FakeSnapshot())
+
+ mox.VerifyAll()
+
+ def test_create_volume_from_snapshot(self):
+ """Tests volume creation from snapshot"""
+ drv = self._driver
+ mox = self._mox
+ volume = FakeVolume(1)
+ snapshot = FakeSnapshot(2)
+
+ self.assertRaises(exception.NovaException,
+ drv.create_volume_from_snapshot,
+ volume,
+ snapshot)
+
+ snapshot = FakeSnapshot(1)
+
+ location = '127.0.0.1:/nfs'
+ expected_result = {'provider_location': location}
+ mox.StubOutWithMock(drv, '_clone_volume')
+ mox.StubOutWithMock(drv, '_get_volume_location')
+ drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
+ drv._get_volume_location(IgnoreArg()).AndReturn(location)
+
+ mox.ReplayAll()
+
+ loc = drv.create_volume_from_snapshot(volume, snapshot)
+
+ self.assertEquals(loc, expected_result)
+
+ mox.VerifyAll()
+
+ def _prepare_delete_snapshot_mock(self, snapshot_exists):
+ drv = self._driver
+ mox = self._mox
+
+ mox.StubOutWithMock(drv, '_get_provider_location')
+ mox.StubOutWithMock(drv, '_volume_not_present')
+
+ if snapshot_exists:
+ mox.StubOutWithMock(drv, '_execute')
+ mox.StubOutWithMock(drv, '_get_volume_path')
+
+ drv._get_provider_location(IgnoreArg())
+ drv._volume_not_present(IgnoreArg(), IgnoreArg())\
+ .AndReturn(not snapshot_exists)
+
+ if snapshot_exists:
+ drv._get_volume_path(IgnoreArg(), IgnoreArg())
+ drv._execute('rm', None, run_as_root=True)
+
+ mox.ReplayAll()
+
+ return mox
+
+ def test_delete_existing_snapshot(self):
+ drv = self._driver
+ mox = self._prepare_delete_snapshot_mock(True)
+
+ drv.delete_snapshot(FakeSnapshot())
+
+ mox.VerifyAll()
+
+ def test_delete_missing_snapshot(self):
+ drv = self._driver
+ mox = self._prepare_delete_snapshot_mock(False)
+
+ drv.delete_snapshot(FakeSnapshot())
+
+ mox.VerifyAll()
+
+ def _prepare_clone_mock(self, status):
+ drv = self._driver
+ mox = self._mox
+
+ volume = FakeVolume()
+ setattr(volume, 'provider_location', '127.0.0.1:/nfs')
+
+ drv._client = MockObject(suds.client.Client)
+ drv._client.factory = MockObject(suds.client.Factory)
+ drv._client.service = MockObject(suds.client.ServiceSelector)
+
+ # ApiProxy() method is generated by ServiceSelector at runtime from the
+ # XML, so mocking is impossible.
+ setattr(drv._client.service,
+ 'ApiProxy',
+ types.MethodType(lambda *args, **kwargs: FakeResponce(status),
+ suds.client.ServiceSelector))
+ mox.StubOutWithMock(drv, '_get_host_id')
+ mox.StubOutWithMock(drv, '_get_full_export_path')
+
+ drv._get_host_id(IgnoreArg()).AndReturn('10')
+ drv._get_full_export_path(IgnoreArg(), IgnoreArg()).AndReturn('/nfs')
+
+ return mox
+
+ def test_successfull_clone_volume(self):
+ drv = self._driver
+ mox = self._prepare_clone_mock('passed')
+
+ mox.ReplayAll()
+
+ volume_name = 'volume_name'
+ clone_name = 'clone_name'
+ volume_id = volume_name + str(hash(volume_name))
+
+ drv._clone_volume(volume_name, clone_name, volume_id)
+
+ mox.VerifyAll()
+
+ def test_failed_clone_volume(self):
+ drv = self._driver
+ mox = self._prepare_clone_mock('failed')
+
+ mox.ReplayAll()
+
+ volume_name = 'volume_name'
+ clone_name = 'clone_name'
+ volume_id = volume_name + str(hash(volume_name))
+
+ self.assertRaises(exception.NovaException,
+ drv._clone_volume,
+ volume_name, clone_name, volume_id)
+
+ mox.VerifyAll()
diff --git a/nova/tests/test_nfs.py b/nova/tests/test_nfs.py
new file mode 100644
index 000000000..8a931b081
--- /dev/null
+++ b/nova/tests/test_nfs.py
@@ -0,0 +1,629 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Unit tests for the NFS driver module"""
+
+import __builtin__
+import errno
+import os
+
+import mox as mox_lib
+from mox import IgnoreArg
+from mox import IsA
+from mox import stubout
+
+from nova import context
+from nova import exception
+from nova.exception import ProcessExecutionError
+from nova import test
+
+from nova.volume import nfs
+
+
+class DumbVolume(object):
+ fields = {}
+
+ def __setitem__(self, key, value):
+ self.fields[key] = value
+
+ def __getitem__(self, item):
+ return self.fields[item]
+
+
+class NfsDriverTestCase(test.TestCase):
+ """Test case for NFS driver"""
+
+ TEST_NFS_EXPORT1 = 'nfs-host1:/export'
+ TEST_NFS_EXPORT2 = 'nfs-host2:/export'
+ TEST_SIZE_IN_GB = 1
+ TEST_MNT_POINT = '/mnt/nfs'
+ TEST_MNT_POINT_BASE = '/mnt/test'
+ TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
+ TEST_FILE_NAME = 'test.txt'
+ TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
+ ONE_GB_IN_BYTES = 1024 * 1024 * 1024
+
+ def setUp(self):
+ self._driver = nfs.NfsDriver()
+ self._mox = mox_lib.Mox()
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self._mox.UnsetStubs()
+ self.stubs.UnsetAll()
+
+ def stub_out_not_replaying(self, obj, attr_name):
+ attr_to_replace = getattr(obj, attr_name)
+ stub = mox_lib.MockObject(attr_to_replace)
+ self.stubs.Set(obj, attr_name, stub)
+
+ def test_path_exists_should_return_true(self):
+ """_path_exists should return True if stat returns 0"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True)
+
+ mox.ReplayAll()
+
+ self.assertTrue(drv._path_exists(self.TEST_FILE_NAME))
+
+ mox.VerifyAll()
+
+ def test_path_exists_should_return_false(self):
+ """_path_exists should return True if stat doesn't return 0"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True).\
+ AndRaise(ProcessExecutionError(
+ stderr="stat: cannot stat `test.txt': No such file or directory"))
+
+ mox.ReplayAll()
+
+ self.assertFalse(drv._path_exists(self.TEST_FILE_NAME))
+
+ mox.VerifyAll()
+
+ def test_local_path(self):
+ """local_path common use case"""
+ nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
+ drv = self._driver
+
+ volume = DumbVolume()
+ volume['provider_location'] = self.TEST_NFS_EXPORT1
+ volume['name'] = 'volume-123'
+
+ self.assertEqual('/mnt/test/12118957640568004265/volume-123',
+ drv.local_path(volume))
+
+ def test_mount_nfs_should_mount_correctly(self):
+ """_mount_nfs common case usage"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_path_exists')
+ drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
+ self.TEST_MNT_POINT, run_as_root=True)
+
+ mox.ReplayAll()
+
+ drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT)
+
+ mox.VerifyAll()
+
+ def test_mount_nfs_should_suppress_already_mounted_error(self):
+ """_mount_nfs should suppress already mounted error if ensure=True
+ """
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_path_exists')
+ drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
+ self.TEST_MNT_POINT, run_as_root=True).\
+ AndRaise(ProcessExecutionError(
+ stderr='is busy or already mounted'))
+
+ mox.ReplayAll()
+
+ drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True)
+
+ mox.VerifyAll()
+
+ def test_mount_nfs_should_reraise_already_mounted_error(self):
+ """_mount_nfs should not suppress already mounted error if ensure=False
+ """
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_path_exists')
+ drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
+ self.TEST_MNT_POINT, run_as_root=True).\
+ AndRaise(ProcessExecutionError(stderr='is busy or already mounted'))
+
+ mox.ReplayAll()
+
+ self.assertRaises(ProcessExecutionError, drv._mount_nfs,
+ self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
+ ensure=False)
+
+ mox.VerifyAll()
+
+ def test_mount_nfs_should_create_mountpoint_if_not_yet(self):
+ """_mount_nfs should create mountpoint if it doesn't exist"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_path_exists')
+ drv._path_exists(self.TEST_MNT_POINT).AndReturn(False)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
+ drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg())
+
+ mox.ReplayAll()
+
+ drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT)
+
+ mox.VerifyAll()
+
+ def test_mount_nfs_should_not_create_mountpoint_if_already(self):
+ """_mount_nfs should not create mountpoint if it already exists"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_path_exists')
+ drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg())
+
+ mox.ReplayAll()
+
+ drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT)
+
+ mox.VerifyAll()
+
+ def test_get_hash_str(self):
+ """_get_hash_str should calculation correct value"""
+ drv = self._driver
+
+ self.assertEqual('12118957640568004265',
+ drv._get_hash_str(self.TEST_NFS_EXPORT1))
+
+ def test_get_mount_point_for_share(self):
+ """_get_mount_point_for_share should calculate correct value"""
+ drv = self._driver
+
+ nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
+
+ self.assertEqual('/mnt/test/12118957640568004265',
+ drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
+
+ def test_get_available_capacity_with_df(self):
+ """_get_available_capacity should calculate correct value"""
+ mox = self._mox
+ drv = self._driver
+
+ df_avail = 1490560
+ df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n'
+ df_data = 'nfs-host:/export 2620544 996864 %d 41%% /mnt' % df_avail
+ df_output = df_head + df_data
+
+ setattr(nfs.FLAGS, 'nfs_disk_util', 'df')
+
+ mox.StubOutWithMock(drv, '_get_mount_point_for_share')
+ drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\
+ AndReturn(self.TEST_MNT_POINT)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT,
+ run_as_root=True).AndReturn((df_output, None))
+
+ mox.ReplayAll()
+
+ self.assertEquals(df_avail,
+ drv._get_available_capacity(self.TEST_NFS_EXPORT1))
+
+ mox.VerifyAll()
+
+ delattr(nfs.FLAGS, 'nfs_disk_util')
+
+ def test_get_available_capacity_with_du(self):
+ """_get_available_capacity should calculate correct value"""
+ mox = self._mox
+ drv = self._driver
+
+ setattr(nfs.FLAGS, 'nfs_disk_util', 'du')
+
+ df_total_size = 2620544
+ df_used_size = 996864
+ df_avail_size = 1490560
+ df_title = 'Filesystem 1-blocks Used Available Use% Mounted on\n'
+ df_mnt_data = 'nfs-host:/export %d %d %d 41%% /mnt' % (df_total_size,
+ df_used_size,
+ df_avail_size)
+ df_output = df_title + df_mnt_data
+
+ du_used = 490560
+ du_output = '%d /mnt' % du_used
+
+ mox.StubOutWithMock(drv, '_get_mount_point_for_share')
+ drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\
+ AndReturn(self.TEST_MNT_POINT)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT,
+ run_as_root=True).\
+ AndReturn((df_output, None))
+ drv._execute('du', '-sb', '--apparent-size',
+ '--exclude', '*snapshot*',
+ self.TEST_MNT_POINT,
+ run_as_root=True).AndReturn((du_output, None))
+
+ mox.ReplayAll()
+
+ self.assertEquals(df_total_size - du_used,
+ drv._get_available_capacity(self.TEST_NFS_EXPORT1))
+
+ mox.VerifyAll()
+
+ delattr(nfs.FLAGS, 'nfs_disk_util')
+
+ def test_load_shares_config(self):
+ mox = self._mox
+ drv = self._driver
+
+ nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
+
+ mox.StubOutWithMock(__builtin__, 'open')
+ config_data = []
+ config_data.append(self.TEST_NFS_EXPORT1)
+ config_data.append('#' + self.TEST_NFS_EXPORT2)
+ config_data.append('')
+ __builtin__.open(self.TEST_SHARES_CONFIG_FILE).AndReturn(config_data)
+ mox.ReplayAll()
+
+ shares = drv._load_shares_config()
+
+ self.assertEqual([self.TEST_NFS_EXPORT1], shares)
+
+ mox.VerifyAll()
+
+ def test_ensure_share_mounted(self):
+ """_ensure_share_mounted simple use case"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_get_mount_point_for_share')
+ drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\
+ AndReturn(self.TEST_MNT_POINT)
+
+ mox.StubOutWithMock(drv, '_mount_nfs')
+ drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True)
+
+ mox.ReplayAll()
+
+ drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
+
+ mox.VerifyAll()
+
+ def test_ensure_shares_mounted_should_save_mounting_successfully(self):
+ """_ensure_shares_mounted should save share if mounted with success"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_load_shares_config')
+ drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1])
+ mox.StubOutWithMock(drv, '_ensure_share_mounted')
+ drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
+
+ mox.ReplayAll()
+
+ drv._ensure_shares_mounted()
+
+ self.assertEqual(1, len(drv._mounted_shares))
+ self.assertEqual(self.TEST_NFS_EXPORT1, drv._mounted_shares[0])
+
+ mox.VerifyAll()
+
+ def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
+ """_ensure_shares_mounted should not save share if failed to mount"""
+ mox = self._mox
+ drv = self._driver
+
+ mox.StubOutWithMock(drv, '_load_shares_config')
+ drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1])
+ mox.StubOutWithMock(drv, '_ensure_share_mounted')
+ drv._ensure_share_mounted(self.TEST_NFS_EXPORT1).AndRaise(Exception())
+
+ mox.ReplayAll()
+
+ drv._ensure_shares_mounted()
+
+ self.assertEqual(0, len(drv._mounted_shares))
+
+ mox.VerifyAll()
+
+ def test_setup_should_throw_error_if_shares_config_not_configured(self):
+ """do_setup should throw error if shares config is not configured """
+ drv = self._driver
+
+ nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
+
+ self.assertRaises(exception.NfsException,
+ drv.do_setup, IsA(context.RequestContext))
+
+ def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
+ """do_setup should throw error if nfs client is not installed """
+ mox = self._mox
+ drv = self._driver
+
+ nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
+
+ mox.StubOutWithMock(os.path, 'exists')
+ os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('mount.nfs', check_exit_code=False).\
+ AndRaise(OSError(errno.ENOENT, 'No such file or directory'))
+
+ mox.ReplayAll()
+
+ self.assertRaises(exception.NfsException,
+ drv.do_setup, IsA(context.RequestContext))
+
+ mox.VerifyAll()
+
+ def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
+ """_find_share should throw error if there is no mounted shares"""
+ drv = self._driver
+
+ drv._mounted_shares = []
+
+ self.assertRaises(exception.NotFound, drv._find_share,
+ self.TEST_SIZE_IN_GB)
+
+ def test_find_share(self):
+ """_find_share simple use case"""
+ mox = self._mox
+ drv = self._driver
+
+ drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
+
+ mox.StubOutWithMock(drv, '_get_available_capacity')
+ drv._get_available_capacity(self.TEST_NFS_EXPORT1).\
+ AndReturn(2 * self.ONE_GB_IN_BYTES)
+ drv._get_available_capacity(self.TEST_NFS_EXPORT2).\
+ AndReturn(3 * self.ONE_GB_IN_BYTES)
+
+ mox.ReplayAll()
+
+ self.assertEqual(self.TEST_NFS_EXPORT2,
+ drv._find_share(self.TEST_SIZE_IN_GB))
+
+ mox.VerifyAll()
+
+ def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
+ """_find_share should throw error if there is no share to host vol"""
+ mox = self._mox
+ drv = self._driver
+
+ drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
+
+ mox.StubOutWithMock(drv, '_get_available_capacity')
+ drv._get_available_capacity(self.TEST_NFS_EXPORT1).\
+ AndReturn(0)
+ drv._get_available_capacity(self.TEST_NFS_EXPORT2).\
+ AndReturn(0)
+
+ mox.ReplayAll()
+
+ self.assertRaises(exception.NfsNoSuitableShareFound, drv._find_share,
+ self.TEST_SIZE_IN_GB)
+
+ mox.VerifyAll()
+
+ def _simple_volume(self):
+ volume = DumbVolume()
+ volume['provider_location'] = '127.0.0.1:/mnt'
+ volume['name'] = 'volume_name'
+ volume['size'] = 10
+
+ return volume
+
+ def test_create_sparsed_volume(self):
+ mox = self._mox
+ drv = self._driver
+ volume = self._simple_volume()
+
+ setattr(nfs.FLAGS, 'nfs_sparsed_volumes', True)
+
+ mox.StubOutWithMock(drv, '_create_sparsed_file')
+ mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
+
+ drv._create_sparsed_file(IgnoreArg(), IgnoreArg())
+ drv._set_rw_permissions_for_all(IgnoreArg())
+
+ mox.ReplayAll()
+
+ drv._do_create_volume(volume)
+
+ mox.VerifyAll()
+
+ delattr(nfs.FLAGS, 'nfs_sparsed_volumes')
+
+ def test_create_nonsparsed_volume(self):
+ mox = self._mox
+ drv = self._driver
+ volume = self._simple_volume()
+
+ setattr(nfs.FLAGS, 'nfs_sparsed_volumes', False)
+
+ mox.StubOutWithMock(drv, '_create_regular_file')
+ mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
+
+ drv._create_regular_file(IgnoreArg(), IgnoreArg())
+ drv._set_rw_permissions_for_all(IgnoreArg())
+
+ mox.ReplayAll()
+
+ drv._do_create_volume(volume)
+
+ mox.VerifyAll()
+
+ delattr(nfs.FLAGS, 'nfs_sparsed_volumes')
+
+ def test_create_volume_should_ensure_nfs_mounted(self):
+ """create_volume should ensure shares provided in config are mounted"""
+ mox = self._mox
+ drv = self._driver
+
+ self.stub_out_not_replaying(nfs, 'LOG')
+ self.stub_out_not_replaying(drv, '_find_share')
+ self.stub_out_not_replaying(drv, '_do_create_volume')
+
+ mox.StubOutWithMock(drv, '_ensure_shares_mounted')
+ drv._ensure_shares_mounted()
+
+ mox.ReplayAll()
+
+ volume = DumbVolume()
+ volume['size'] = self.TEST_SIZE_IN_GB
+ drv.create_volume(volume)
+
+ mox.VerifyAll()
+
+ def test_create_volume_should_return_provider_location(self):
+ """create_volume should return provider_location with found share """
+ mox = self._mox
+ drv = self._driver
+
+ self.stub_out_not_replaying(nfs, 'LOG')
+ self.stub_out_not_replaying(drv, '_ensure_shares_mounted')
+ self.stub_out_not_replaying(drv, '_do_create_volume')
+
+ mox.StubOutWithMock(drv, '_find_share')
+ drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_NFS_EXPORT1)
+
+ mox.ReplayAll()
+
+ volume = DumbVolume()
+ volume['size'] = self.TEST_SIZE_IN_GB
+ result = drv.create_volume(volume)
+ self.assertEqual(self.TEST_NFS_EXPORT1, result['provider_location'])
+
+ mox.VerifyAll()
+
+ def test_delete_volume(self):
+ """delete_volume simple test case"""
+ mox = self._mox
+ drv = self._driver
+
+ self.stub_out_not_replaying(drv, '_ensure_share_mounted')
+
+ volume = DumbVolume()
+ volume['name'] = 'volume-123'
+ volume['provider_location'] = self.TEST_NFS_EXPORT1
+
+ mox.StubOutWithMock(drv, 'local_path')
+ drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH)
+
+ mox.StubOutWithMock(drv, '_path_exists')
+ drv._path_exists(self.TEST_LOCAL_PATH).AndReturn(True)
+
+ mox.StubOutWithMock(drv, '_execute')
+ drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True)
+
+ mox.ReplayAll()
+
+ drv.delete_volume(volume)
+
+ mox.VerifyAll()
+
+ def test_delete_should_ensure_share_mounted(self):
+ """delete_volume should ensure that corresponding share is mounted"""
+ mox = self._mox
+ drv = self._driver
+
+ self.stub_out_not_replaying(drv, '_execute')
+
+ volume = DumbVolume()
+ volume['name'] = 'volume-123'
+ volume['provider_location'] = self.TEST_NFS_EXPORT1
+
+ mox.StubOutWithMock(drv, '_ensure_share_mounted')
+ drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
+
+ mox.ReplayAll()
+
+ drv.delete_volume(volume)
+
+ mox.VerifyAll()
+
+ def test_delete_should_not_delete_if_provider_location_not_provided(self):
+ """delete_volume shouldn't try to delete if provider_location missed"""
+ mox = self._mox
+ drv = self._driver
+
+ self.stub_out_not_replaying(drv, '_ensure_share_mounted')
+
+ volume = DumbVolume()
+ volume['name'] = 'volume-123'
+ volume['provider_location'] = None
+
+ mox.StubOutWithMock(drv, '_execute')
+
+ mox.ReplayAll()
+
+ drv.delete_volume(volume)
+
+ mox.VerifyAll()
+
+ def test_delete_should_not_delete_if_there_is_no_file(self):
+ """delete_volume should not try to delete if file missed"""
+ mox = self._mox
+ drv = self._driver
+
+ self.stub_out_not_replaying(drv, '_ensure_share_mounted')
+
+ volume = DumbVolume()
+ volume['name'] = 'volume-123'
+ volume['provider_location'] = self.TEST_NFS_EXPORT1
+
+ mox.StubOutWithMock(drv, 'local_path')
+ drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH)
+
+ mox.StubOutWithMock(drv, '_path_exists')
+ drv._path_exists(self.TEST_LOCAL_PATH).AndReturn(False)
+
+ mox.StubOutWithMock(drv, '_execute')
+
+ mox.ReplayAll()
+
+ drv.delete_volume(volume)
+
+ mox.VerifyAll()
diff --git a/nova/volume/netapp.py b/nova/volume/netapp.py
index 6dd5c0e31..ce62a33ac 100644
--- a/nova/volume/netapp.py
+++ b/nova/volume/netapp.py
@@ -994,3 +994,297 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
def check_for_export(self, context, volume_id):
raise NotImplementedError()
+
+
+class NetAppLun(object):
+ """Represents a LUN on NetApp storage."""
+
+ def __init__(self, handle, name, size, metadata_dict):
+ self.handle = handle
+ self.name = name
+ self.size = size
+ self.metadata = metadata_dict
+
+ def get_metadata_property(self, prop):
+ """Get the metadata property of a LUN."""
+ if prop in self.metadata:
+ return self.metadata[prop]
+ name = self.name
+ msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
+ LOG.debug(msg % locals())
+
+
+class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
+ """NetApp C-mode iSCSI volume driver."""
+
+ def __init__(self, *args, **kwargs):
+ super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs)
+ self.lun_table = {}
+
+ def _create_client(self, **kwargs):
+ """Instantiate a web services client.
+
+ This method creates a "suds" client to make web services calls to the
+ DFM server. Note that the WSDL file is quite large and may take
+ a few seconds to parse.
+ """
+ wsdl_url = kwargs['wsdl_url']
+ LOG.debug(_('Using WSDL: %s') % wsdl_url)
+ if kwargs['cache']:
+ self.client = client.Client(wsdl_url, username=kwargs['login'],
+ password=kwargs['password'])
+ else:
+ self.client = client.Client(wsdl_url, username=kwargs['login'],
+ password=kwargs['password'],
+ cache=None)
+
+ def _check_flags(self):
+ """Ensure that the flags we care about are set."""
+ required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
+ 'netapp_server_hostname', 'netapp_server_port']
+ for flag in required_flags:
+ if not getattr(FLAGS, flag, None):
+ msg = _('%s is not set') % flag
+ raise exception.InvalidInput(data=msg)
+
+ def do_setup(self, context):
+ """Setup the NetApp Volume driver.
+
+ Called one time by the manager after the driver is loaded.
+ Validate the flags we care about and setup the suds (web services)
+ client.
+ """
+ self._check_flags()
+ self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
+ login=FLAGS.netapp_login, password=FLAGS.netapp_password,
+ hostname=FLAGS.netapp_server_hostname,
+ port=FLAGS.netapp_server_port, cache=True)
+
+ def check_for_setup_error(self):
+ """Check that the driver is working and can communicate.
+
+ Discovers the LUNs on the NetApp server.
+ """
+ self.lun_table = {}
+ luns = self.client.service.ListLuns()
+ for lun in luns:
+ meta_dict = {}
+ if hasattr(lun, 'Metadata'):
+ meta_dict = self._create_dict_from_meta(lun.Metadata)
+ discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size,
+ meta_dict)
+ self._add_lun_to_table(discovered_lun)
+ LOG.debug(_("Success getting LUN list from server"))
+
+ def create_volume(self, volume):
+ """Driver entry point for creating a new volume."""
+ default_size = '104857600' # 100 MB
+ gigabytes = 1073741824L # 2^30
+ name = volume['name']
+ if int(volume['size']) == 0:
+ size = default_size
+ else:
+ size = str(int(volume['size']) * gigabytes)
+ extra_args = {}
+ extra_args['OsType'] = 'linux'
+ extra_args['QosType'] = self._get_qos_type(volume)
+ extra_args['Container'] = volume['project_id']
+ extra_args['Display'] = volume['display_name']
+ extra_args['Description'] = volume['display_description']
+ extra_args['SpaceReserved'] = True
+ server = self.client.service
+ metadata = self._create_metadata_list(extra_args)
+ lun = server.ProvisionLun(Name=name, Size=size,
+ Metadata=metadata)
+ LOG.debug(_("Created LUN with name %s") % name)
+ self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
+ lun.Size, self._create_dict_from_meta(lun.Metadata)))
+
+ def delete_volume(self, volume):
+ """Driver entry point for destroying existing volumes."""
+ name = volume['name']
+ handle = self._get_lun_handle(name)
+ self.client.service.DestroyLun(Handle=handle)
+ LOG.debug(_("Destroyed LUN %s") % handle)
+ self.lun_table.pop(name)
+
+ def ensure_export(self, context, volume):
+ """Driver entry point to get the export info for an existing volume."""
+ handle = self._get_lun_handle(volume['name'])
+ return {'provider_location': handle}
+
+ def create_export(self, context, volume):
+ """Driver entry point to get the export info for a new volume."""
+ handle = self._get_lun_handle(volume['name'])
+ return {'provider_location': handle}
+
+ def remove_export(self, context, volume):
+ """Driver exntry point to remove an export for a volume.
+
+ Since exporting is idempotent in this driver, we have nothing
+ to do for unexporting.
+ """
+ pass
+
+ def initialize_connection(self, volume, connector):
+ """Driver entry point to attach a volume to an instance.
+
+ Do the LUN masking on the storage system so the initiator can access
+ the LUN on the target. Also return the iSCSI properties so the
+ initiator can find the LUN. This implementation does not call
+ _get_iscsi_properties() to get the properties because cannot store the
+ LUN number in the database. We only find out what the LUN number will
+ be during this method call so we construct the properties dictionary
+ ourselves.
+ """
+ initiator_name = connector['initiator']
+ handle = volume['provider_location']
+ server = self.client.service
+ server.MapLun(Handle=handle, InitiatorType="iscsi",
+ InitiatorName=initiator_name)
+ msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s")
+ LOG.debug(msg % locals())
+
+ target_details_list = server.GetLunTargetDetails(Handle=handle,
+ InitiatorType="iscsi", InitiatorName=initiator_name)
+ msg = _("Succesfully fetched target details for LUN %(handle)s and "
+ "initiator %(initiator_name)s")
+ LOG.debug(msg % locals())
+
+ if not target_details_list:
+ msg = _('Failed to get LUN target details for the LUN %s')
+ raise exception.VolumeBackendAPIException(msg % handle)
+ target_details = target_details_list[0]
+ if not target_details.Address and target_details.Port:
+ msg = _('Failed to get target portal for the LUN %s')
+ raise exception.VolumeBackendAPIException(msg % handle)
+ iqn = target_details.Iqn
+ if not iqn:
+ msg = _('Failed to get target IQN for the LUN %s')
+ raise exception.VolumeBackendAPIException(msg % handle)
+
+ properties = {}
+ properties['target_discovered'] = False
+ (address, port) = (target_details.Address, target_details.Port)
+ properties['target_portal'] = '%s:%s' % (address, port)
+ properties['target_iqn'] = iqn
+ properties['target_lun'] = target_details.LunNumber
+ properties['volume_id'] = volume['id']
+
+ auth = volume['provider_auth']
+ if auth:
+ (auth_method, auth_username, auth_secret) = auth.split()
+ properties['auth_method'] = auth_method
+ properties['auth_username'] = auth_username
+ properties['auth_password'] = auth_secret
+
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': properties,
+ }
+
+ def terminate_connection(self, volume, connector):
+ """Driver entry point to unattach a volume from an instance.
+
+ Unmask the LUN on the storage system so the given intiator can no
+ longer access it.
+ """
+ initiator_name = connector['initiator']
+ handle = volume['provider_location']
+ self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi",
+ InitiatorName=initiator_name)
+ msg = _("Unmapped LUN %(handle)s from the initiator "
+ "%(initiator_name)s")
+ LOG.debug(msg % locals())
+
+ def create_snapshot(self, snapshot):
+ """Driver entry point for creating a snapshot.
+
+ This driver implements snapshots by using efficient single-file
+ (LUN) cloning.
+ """
+ vol_name = snapshot['volume_name']
+ snapshot_name = snapshot['name']
+ lun = self.lun_table[vol_name]
+ extra_args = {'SpaceReserved': False}
+ self._clone_lun(lun.handle, snapshot_name, extra_args)
+
+ def delete_snapshot(self, snapshot):
+ """Driver entry point for deleting a snapshot."""
+ handle = self._get_lun_handle(snapshot['name'])
+ self.client.service.DestroyLun(Handle=handle)
+ LOG.debug(_("Destroyed LUN %s") % handle)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Driver entry point for creating a new volume from a snapshot.
+
+ Many would call this "cloning" and in fact we use cloning to implement
+ this feature.
+ """
+ snapshot_name = snapshot['name']
+ lun = self.lun_table[snapshot_name]
+ new_name = volume['name']
+ extra_args = {}
+ extra_args['OsType'] = 'linux'
+ extra_args['QosType'] = self._get_qos_type(volume)
+ extra_args['Container'] = volume['project_id']
+ extra_args['Display'] = volume['display_name']
+ extra_args['Description'] = volume['display_description']
+ extra_args['SpaceReserved'] = True
+ self._clone_lun(lun.handle, new_name, extra_args)
+
+ def check_for_export(self, context, volume_id):
+ raise NotImplementedError()
+
+ def _get_qos_type(self, volume):
+ """Get the storage service type for a volume."""
+ type_id = volume['volume_type_id']
+ if not type_id:
+ return None
+ volume_type = volume_types.get_volume_type(None, type_id)
+ if not volume_type:
+ return None
+ return volume_type['name']
+
+ def _add_lun_to_table(self, lun):
+ """Adds LUN to cache table."""
+ if not isinstance(lun, NetAppLun):
+ msg = _("Object is not a NetApp LUN.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ self.lun_table[lun.name] = lun
+
+ def _clone_lun(self, handle, new_name, extra_args):
+ """Clone LUN with the given handle to the new name."""
+ server = self.client.service
+ metadata = self._create_metadata_list(extra_args)
+ lun = server.CloneLun(Handle=handle, NewName=new_name,
+ Metadata=metadata)
+ LOG.debug(_("Cloned LUN with new name %s") % new_name)
+ self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
+ lun.Size, self._create_dict_from_meta(lun.Metadata)))
+
+ def _create_metadata_list(self, extra_args):
+ """Creates metadata from kwargs."""
+ metadata = []
+ for key in extra_args.keys():
+ meta = self.client.factory.create("Metadata")
+ meta.Key = key
+ meta.Value = extra_args[key]
+ metadata.append(meta)
+ return metadata
+
+ def _get_lun_handle(self, name):
+ """Get the details for a LUN from our cache table."""
+ if not name in self.lun_table:
+ LOG.warn(_("Could not find handle for LUN named %s") % name)
+ return None
+ return self.lun_table[name]
+
+ def _create_dict_from_meta(self, metadata):
+ """Creates dictionary from metadata array."""
+ meta_dict = {}
+ if not metadata:
+ return meta_dict
+ for meta in metadata:
+ meta_dict[meta.Key] = meta.Value
+ return meta_dict
diff --git a/nova/volume/netapp_nfs.py b/nova/volume/netapp_nfs.py
new file mode 100644
index 000000000..27d278aa3
--- /dev/null
+++ b/nova/volume/netapp_nfs.py
@@ -0,0 +1,267 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver for NetApp NFS storage.
+"""
+
+import os
+import suds
+import time
+
+from nova import exception
+from nova import flags
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.volume.netapp import netapp_opts
+from nova.volume import nfs
+
+from suds.sax import text
+
+LOG = logging.getLogger(__name__)
+
+netapp_nfs_opts = [
+ cfg.IntOpt('synchronous_snapshot_create',
+ default=0,
+ help='Does snapshot creation call returns immediately')
+ ]
+
+FLAGS = flags.FLAGS
+FLAGS.register_opts(netapp_opts)
+FLAGS.register_opts(netapp_nfs_opts)
+
+
+class NetAppNFSDriver(nfs.NfsDriver):
+ """Executes commands relating to Volumes."""
+ def __init__(self, *args, **kwargs):
+ # NOTE(vish): db is set by Manager
+ self._execute = None
+ self._context = None
+ super(NetAppNFSDriver, self).__init__(*args, **kwargs)
+
+ def set_execute(self, execute):
+ self._execute = execute
+
+ def do_setup(self, context):
+ self._context = context
+ self.check_for_setup_error()
+ self._client = NetAppNFSDriver._get_client()
+
+ def check_for_setup_error(self):
+ """Returns an error if prerequisites aren't met"""
+ NetAppNFSDriver._check_dfm_flags()
+ super(NetAppNFSDriver, self).check_for_setup_error()
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot."""
+ vol_size = volume.size
+ snap_size = snapshot.volume_size
+
+ if vol_size != snap_size:
+ msg = _('Cannot create volume of size %(vol_size)s from '
+ 'snapshot of size %(snap_size)s')
+ raise exception.NovaException(msg % locals())
+
+ self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
+ share = self._get_volume_location(snapshot.volume_id)
+
+ return {'provider_location': share}
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot."""
+ self._clone_volume(snapshot['volume_name'],
+ snapshot['name'],
+ snapshot['volume_id'])
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot."""
+ nfs_mount = self._get_provider_location(snapshot.volume_id)
+
+ if self._volume_not_present(nfs_mount, snapshot.name):
+ return True
+
+ self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
+ run_as_root=True)
+
+ @staticmethod
+ def _check_dfm_flags():
+ """Raises error if any required configuration flag for OnCommand proxy
+ is missing."""
+ required_flags = ['netapp_wsdl_url',
+ 'netapp_login',
+ 'netapp_password',
+ 'netapp_server_hostname',
+ 'netapp_server_port']
+ for flag in required_flags:
+ if not getattr(FLAGS, flag, None):
+ raise exception.NovaException(_('%s is not set') % flag)
+
+ @staticmethod
+ def _get_client():
+ """Creates SOAP _client for ONTAP-7 DataFabric Service."""
+ client = suds.client.Client(FLAGS.netapp_wsdl_url,
+ username=FLAGS.netapp_login,
+ password=FLAGS.netapp_password)
+ soap_url = 'http://%s:%s/apis/soap/v1' % (
+ FLAGS.netapp_server_hostname,
+ FLAGS.netapp_server_port)
+ client.set_options(location=soap_url)
+
+ return client
+
+ def _get_volume_location(self, volume_id):
+ """Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>"""
+ nfs_server_ip = self._get_host_ip(volume_id)
+ export_path = self._get_export_path(volume_id)
+ return (nfs_server_ip + ':' + export_path)
+
+ def _clone_volume(self, volume_name, clone_name, volume_id):
+ """Clones mounted volume with OnCommand proxy API"""
+ host_id = self._get_host_id(volume_id)
+ export_path = self._get_full_export_path(volume_id, host_id)
+
+ request = self._client.factory.create('Request')
+ request.Name = 'clone-start'
+
+ clone_start_args = ('<source-path>%s/%s</source-path>'
+ '<destination-path>%s/%s</destination-path>')
+
+ request.Args = text.Raw(clone_start_args % (export_path,
+ volume_name,
+ export_path,
+ clone_name))
+
+ resp = self._client.service.ApiProxy(Target=host_id,
+ Request=request)
+
+ if resp.Status == 'passed' and FLAGS.synchronous_snapshot_create:
+ clone_id = resp.Results['clone-id'][0]
+ clone_id_info = clone_id['clone-id-info'][0]
+ clone_operation_id = int(clone_id_info['clone-op-id'][0])
+
+ self._wait_for_clone_finished(clone_operation_id, host_id)
+ elif resp.Status == 'failed':
+ raise exception.NovaException(resp.Reason)
+
+ def _wait_for_clone_finished(self, clone_operation_id, host_id):
+ """
+ Polls ONTAP7 for clone status. Returns once clone is finished.
+ :param clone_operation_id: Identifier of ONTAP clone operation
+ """
+ clone_list_options = ('<clone-id>'
+ '<clone-id-info>'
+ '<clone-op-id>%d</clone-op-id>'
+ '<volume-uuid></volume-uuid>'
+ '</clone-id>'
+ '</clone-id-info>')
+
+ request = self._client.factory.create('Request')
+ request.Name = 'clone-list-status'
+ request.Args = text.Raw(clone_list_options % clone_operation_id)
+
+ resp = self._client.service.ApiProxy(Target=host_id, Request=request)
+
+ while resp.Status != 'passed':
+ time.sleep(1)
+ resp = self._client.service.ApiProxy(Target=host_id,
+ Request=request)
+
+ def _get_provider_location(self, volume_id):
+ """
+ Returns provider location for given volume
+ :param volume_id:
+ """
+ volume = self.db.volume_get(self._context, volume_id)
+ return volume.provider_location
+
+ def _get_host_ip(self, volume_id):
+ """Returns IP address for the given volume"""
+ return self._get_provider_location(volume_id).split(':')[0]
+
+ def _get_export_path(self, volume_id):
+ """Returns NFS export path for the given volume"""
+ return self._get_provider_location(volume_id).split(':')[1]
+
+ def _get_host_id(self, volume_id):
+ """Returns ID of the ONTAP-7 host"""
+ host_ip = self._get_host_ip(volume_id)
+ server = self._client.service
+
+ resp = server.HostListInfoIterStart(ObjectNameOrId=host_ip)
+ tag = resp.Tag
+
+ try:
+ res = server.HostListInfoIterNext(Tag=tag, Maximum=1)
+ if hasattr(res, 'Hosts') and res.Hosts.HostInfo:
+ return res.Hosts.HostInfo[0].HostId
+ finally:
+ server.HostListInfoIterEnd(Tag=tag)
+
+ def _get_full_export_path(self, volume_id, host_id):
+ """Returns full path to the NFS share, e.g. /vol/vol0/home"""
+ export_path = self._get_export_path(volume_id)
+ command_args = '<pathname>%s</pathname>'
+
+ request = self._client.factory.create('Request')
+ request.Name = 'nfs-exportfs-storage-path'
+ request.Args = text.Raw(command_args % export_path)
+
+ resp = self._client.service.ApiProxy(Target=host_id,
+ Request=request)
+
+ if resp.Status == 'passed':
+ return resp.Results['actual-pathname'][0]
+ elif resp.Status == 'failed':
+ raise exception.NovaException(resp.Reason)
+
+ def _volume_not_present(self, nfs_mount, volume_name):
+ """
+ Check if volume exists
+ """
+ try:
+ self._try_execute('ls', self._get_volume_path(nfs_mount,
+ volume_name))
+ except exception.ProcessExecutionError:
+ # If the volume isn't present
+ return True
+ return False
+
+ def _try_execute(self, *command, **kwargs):
+ # NOTE(vish): Volume commands can partially fail due to timing, but
+ # running them a second time on failure will usually
+ # recover nicely.
+ tries = 0
+ while True:
+ try:
+ self._execute(*command, **kwargs)
+ return True
+ except exception.ProcessExecutionError:
+ tries = tries + 1
+ if tries >= FLAGS.num_shell_tries:
+ raise
+ LOG.exception(_("Recovering from a failed execute. "
+ "Try number %s"), tries)
+ time.sleep(tries ** 2)
+
+ def _get_volume_path(self, nfs_share, volume_name):
+ """Get volume path (local fs path) for given volume name on given nfs
+ share
+ @param nfs_share string, example 172.18.194.100:/var/nfs
+ @param volume_name string,
+ example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
+ """
+ return os.path.join(self._get_mount_point_for_share(nfs_share),
+ volume_name)
diff --git a/nova/volume/nfs.py b/nova/volume/nfs.py
new file mode 100644
index 000000000..f91b52018
--- /dev/null
+++ b/nova/volume/nfs.py
@@ -0,0 +1,293 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ctypes
+import errno
+import os
+
+from nova import exception
+from nova import flags
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.libvirt import volume_nfs
+from nova.volume import driver
+
+LOG = logging.getLogger(__name__)
+
+volume_opts = [
+ cfg.StrOpt('nfs_shares_config',
+ default=None,
+ help='File with the list of available nfs shares'),
+ cfg.StrOpt('nfs_disk_util',
+ default='df',
+ help='Use du or df for free space calculation'),
+ cfg.BoolOpt('nfs_sparsed_volumes',
+ default=True,
+ help=('Create volumes as sparsed files which take no space.'
+ 'If set to False volume is created as regular file.'
+ 'In such case volume creation takes a lot of time.'))
+]
+
+FLAGS = flags.FLAGS
+FLAGS.register_opts(volume_opts)
+FLAGS.register_opts(volume_nfs.volume_opts)
+
+
+class NfsDriver(driver.VolumeDriver):
+ """NFS based volume driver. Creates file on NFS share for using it
+ as block device on hypervisor."""
+
+ def do_setup(self, context):
+ """Any initialization the volume driver does while starting"""
+ super(NfsDriver, self).do_setup(context)
+
+ config = FLAGS.nfs_shares_config
+ if not config:
+ LOG.warn(_("There's no NFS config file configured "))
+ if not config or not os.path.exists(config):
+ msg = _("NFS config file doesn't exist")
+ LOG.warn(msg)
+ raise exception.NfsException(msg)
+
+ try:
+ self._execute('mount.nfs', check_exit_code=False)
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ raise exception.NfsException('mount.nfs is not installed')
+ else:
+ raise
+
+ def check_for_setup_error(self):
+ """Just to override parent behavior"""
+ pass
+
+ def create_volume(self, volume):
+ """Creates a volume"""
+
+ self._ensure_shares_mounted()
+
+ volume['provider_location'] = self._find_share(volume['size'])
+
+ LOG.info(_('casted to %s') % volume['provider_location'])
+
+ self._do_create_volume(volume)
+
+ return {'provider_location': volume['provider_location']}
+
+ def delete_volume(self, volume):
+ """Deletes a logical volume."""
+
+ if not volume['provider_location']:
+ LOG.warn(_('Volume %s does not have provider_location specified, '
+ 'skipping'), volume['name'])
+ return
+
+ self._ensure_share_mounted(volume['provider_location'])
+
+ mounted_path = self.local_path(volume)
+
+ if not self._path_exists(mounted_path):
+ volume = volume['name']
+
+ LOG.warn(_('Trying to delete non-existing volume %(volume)s at '
+ 'path %(mounted_path)s') % locals())
+ return
+
+ self._execute('rm', '-f', mounted_path, run_as_root=True)
+
+ def ensure_export(self, ctx, volume):
+ """Synchronously recreates an export for a logical volume."""
+ self._ensure_share_mounted(volume['provider_location'])
+
+ def create_export(self, ctx, volume):
+ """Exports the volume. Can optionally return a Dictionary of changes
+ to the volume object to be persisted."""
+ pass
+
+ def remove_export(self, ctx, volume):
+ """Removes an export for a logical volume."""
+ pass
+
+ def check_for_export(self, context, volume_id):
+ """Make sure volume is exported."""
+ pass
+
+ def initialize_connection(self, volume, connector):
+ """Allow connection to connector and return connection info."""
+ data = {'export': volume['provider_location'],
+ 'name': volume['name']}
+ return {
+ 'driver_volume_type': 'nfs',
+ 'data': data
+ }
+
+ def terminate_connection(self, volume, connector):
+ """Disallow connection from connector"""
+ pass
+
+ def local_path(self, volume):
+ """Get volume path (mounted locally fs path) for given volume
+ :param volume: volume reference
+ """
+ nfs_share = volume['provider_location']
+ return os.path.join(self._get_mount_point_for_share(nfs_share),
+ volume['name'])
+
+ def _create_sparsed_file(self, path, size):
+ """Creates file with 0 disk usage"""
+ self._execute('truncate', '-s', self._sizestr(size),
+ path, run_as_root=True)
+
+ def _create_regular_file(self, path, size):
+ """Creates regular file of given size. Takes a lot of time for large
+ files"""
+ KB = 1024
+ MB = KB * 1024
+ GB = MB * 1024
+
+ block_size_mb = 1
+ block_count = size * GB / (block_size_mb * MB)
+
+ self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
+ 'bs=%dM' % block_size_mb,
+ 'count=%d' % block_count,
+ run_as_root=True)
+
+ def _set_rw_permissions_for_all(self, path):
+ """Sets 666 permissions for the path"""
+ self._execute('chmod', 'ugo+rw', path, run_as_root=True)
+
+ def _do_create_volume(self, volume):
+ """Create a volume on given nfs_share
+ :param volume: volume reference
+ """
+ volume_path = self.local_path(volume)
+ volume_size = volume['size']
+
+ if FLAGS.nfs_sparsed_volumes:
+ self._create_sparsed_file(volume_path, volume_size)
+ else:
+ self._create_regular_file(volume_path, volume_size)
+
+ self._set_rw_permissions_for_all(volume_path)
+
+ def _ensure_shares_mounted(self):
+ """Look for NFS shares in the flags and tries to mount them locally"""
+ self._mounted_shares = []
+
+ for share in self._load_shares_config():
+ try:
+ self._ensure_share_mounted(share)
+ self._mounted_shares.append(share)
+ except Exception, exc:
+ LOG.warning('Exception during mounting %s' % (exc,))
+
+ LOG.debug('Available shares %s' % str(self._mounted_shares))
+
+ def _load_shares_config(self):
+ return [share.strip() for share in open(FLAGS.nfs_shares_config)
+ if share and not share.startswith('#')]
+
+ def _ensure_share_mounted(self, nfs_share):
+ """Mount NFS share
+ :param nfs_share:
+ """
+ mount_path = self._get_mount_point_for_share(nfs_share)
+ self._mount_nfs(nfs_share, mount_path, ensure=True)
+
+ def _find_share(self, volume_size_for):
+ """Choose NFS share among available ones for given volume size. Current
+ implementation looks for greatest capacity
+ :param volume_size_for: int size in Gb
+ """
+
+ if not self._mounted_shares:
+ raise exception.NfsNoSharesMounted()
+
+ greatest_size = 0
+ greatest_share = None
+
+ for nfs_share in self._mounted_shares:
+ capacity = self._get_available_capacity(nfs_share)
+ if capacity > greatest_size:
+ greatest_share = nfs_share
+ greatest_size = capacity
+
+ if volume_size_for * 1024 * 1024 * 1024 > greatest_size:
+ raise exception.NfsNoSuitableShareFound(
+ volume_size=volume_size_for)
+ return greatest_share
+
+ def _get_mount_point_for_share(self, nfs_share):
+ """
+ :param nfs_share: example 172.18.194.100:/var/nfs
+ """
+ return os.path.join(FLAGS.nfs_mount_point_base,
+ self._get_hash_str(nfs_share))
+
+ def _get_available_capacity(self, nfs_share):
+ """Calculate available space on the NFS share
+ :param nfs_share: example 172.18.194.100:/var/nfs
+ """
+ mount_point = self._get_mount_point_for_share(nfs_share)
+
+ out, _ = self._execute('df', '-P', '-B', '1', mount_point,
+ run_as_root=True)
+ out = out.splitlines()[1]
+
+ available = 0
+
+ if FLAGS.nfs_disk_util == 'df':
+ available = int(out.split()[3])
+ else:
+ size = int(out.split()[1])
+ out, _ = self._execute('du', '-sb', '--apparent-size',
+ '--exclude', '*snapshot*', mount_point,
+ run_as_root=True)
+ used = int(out.split()[0])
+ available = size - used
+
+ return available
+
+ def _mount_nfs(self, nfs_share, mount_path, ensure=False):
+ """Mount NFS share to mount path"""
+ if not self._path_exists(mount_path):
+ self._execute('mkdir', '-p', mount_path)
+
+ try:
+ self._execute('mount', '-t', 'nfs', nfs_share, mount_path,
+ run_as_root=True)
+ except exception.ProcessExecutionError as exc:
+ if ensure and 'already mounted' in exc.stderr:
+ LOG.warn(_("%s is already mounted"), nfs_share)
+ else:
+ raise
+
+ def _path_exists(self, path):
+ """Check given path """
+ try:
+ self._execute('stat', path, run_as_root=True)
+ return True
+ except exception.ProcessExecutionError as exc:
+ if 'No such file or directory' in exc.stderr:
+ return False
+ else:
+ raise
+
+ def _get_hash_str(self, base_str):
+ """returns string that represents hash of base_str (in a hex format)"""
+ return str(ctypes.c_uint64(hash(base_str)).value)