Martin Sivák has uploaded a new change for review.

Change subject: Encapsulate the storage metadata and add support for blockSD
......................................................................

Encapsulate the storage metadata and add support for blockSD

This creates an encapsulation class that is responsible for
creation and usage of storage metadata directory.

It allows to specify arbitrary number of services and then
initializes the proper files and mappings for the broker
to use them.

The example usage is:

b = FilesystemBackend("uuid-of-the-storage-domain", "nfs")
b.create({
  "hosted-engine.lockspace": 1048576,
  "hosted-engine.metadata": 1048576
})
b.connect()

Supported storage types are: nfs, block and glusterfs

The type serves only as a hint, the storage domain has to exist
and be already connected for the above to work. The class does
autodetection of the SD type in all cases except glusterSD.

Change-Id: Iaced4ac08936fc9314ff821343ce33d29a5897cf
Signed-off-by: Martin Sivak <msi...@redhat.com>
---
M ovirt_hosted_engine_ha/env/constants.py.in
M ovirt_hosted_engine_ha/lib/Makefile.am
A ovirt_hosted_engine_ha/lib/storage_backends.py
A ovirt_hosted_engine_ha/lib/storage_backends_test.py
4 files changed, 144 insertions(+), 1 deletion(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-hosted-engine-ha 
refs/changes/97/25797/1

diff --git a/ovirt_hosted_engine_ha/env/constants.py.in 
b/ovirt_hosted_engine_ha/env/constants.py.in
index 73a2ba9..3fefd40 100644
--- a/ovirt_hosted_engine_ha/env/constants.py.in
+++ b/ovirt_hosted_engine_ha/env/constants.py.in
@@ -22,7 +22,6 @@
 METADATA_FEATURE_VERSION = 1
 METADATA_PARSE_VERSION = 1
 
-MD_EXTENSION = 'metadata'
 MAX_HOST_ID_SCAN = 64
 HOST_SEGMENT_BYTES = 4096
 METADATA_BLOCK_BYTES = 512
diff --git a/ovirt_hosted_engine_ha/lib/Makefile.am 
b/ovirt_hosted_engine_ha/lib/Makefile.am
index 22133a8..03660dc 100644
--- a/ovirt_hosted_engine_ha/lib/Makefile.am
+++ b/ovirt_hosted_engine_ha/lib/Makefile.am
@@ -33,6 +33,8 @@
        metadata.py \
        util.py \
        vds_client.py \
+       storage_backends.py \
+       storage_backends_test.py \
        $(NULL)
 
 SUBDIRS = \
diff --git a/ovirt_hosted_engine_ha/lib/storage_backends.py 
b/ovirt_hosted_engine_ha/lib/storage_backends.py
new file mode 100644
index 0000000..dda8a96
--- /dev/null
+++ b/ovirt_hosted_engine_ha/lib/storage_backends.py
@@ -0,0 +1,142 @@
+from collections import namedtuple
+import os
+import struct
+import subprocess
+import zlib
+from ..env import constants
+from . import util
+import math
+from cStringIO import StringIO
+from operator import itemgetter
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class StorageBackend(object):
+    """
+    The base template for Storage backend classes.
+    """
+
+    def __init__(self):
+        # the atomic block size of the underlying storage
+        self._blocksize = 512
+
+    def connect(self):
+        """Initialize the storage."""
+        raise NotImplementedError()
+
+    def disconnect(self):
+        """Close the storage."""
+        raise NotImplementedError()
+
+    def filename(self, service):
+        """
+        Return a tuple with the filename to open and bytes to skip
+        to get to the metadata structures.
+        """
+        raise NotImplementedError()
+
+    @property
+    def blocksize(self):
+        return self._blocksize
+
+    def create(self, service_map):
+        """
+        Reinitialize the storage backend according to the service_map.
+        Key represents service name and value contains the size of the
+        required block in Bytes.
+        """
+        raise NotImplementedError()
+
+
+class FilesystemBackend(StorageBackend):
+    """
+    Backend for all filesystem based access structures. This
+    includes VDSM's LVM block devices as they are accessed using symlinks
+    in the same structure that VDSM uses for NFS based storage domains.
+    """
+    def __init__(self, sd_uuid, dom_type):
+        super(FilesystemBackend, self).__init__()
+        self._sd_uuid = sd_uuid
+        self._dom_type = dom_type
+        self._lv_based = False
+        self._storage_path = None
+
+    def filename(self, service):
+        fname = os.path.join(self._storage_path, service)
+        return (fname, 0)
+
+    def get_domain_path(self, sd_uuid, dom_type):
+        """
+        Return path of storage domain holding engine vm
+        """
+        parent = constants.SD_MOUNT_PARENT
+        if dom_type == 'glusterfs':
+            parent = os.path.join(parent, 'glusterSD')
+
+        for dname in os.listdir(parent):
+            path = os.path.join(parent, dname, sd_uuid)
+            if os.access(path, os.F_OK):
+                if dname == "blockSD":
+                    self._lv_based = True
+                return path
+        raise Exception("path to storage domain {0} not found in {1}"
+                        .format(sd_uuid, parent))
+
+    def connect(self):
+        self._lv_based = False
+        self._storage_path = os.path.join(self.get_domain_path(self._sd_uuid,
+                                                               self._dom_type),
+                                          constants.SD_METADATA_DIR)
+        util.mkdir_recursive(self._storage_path)
+
+        if not self._lv_based:
+            return
+
+        # create LV symlinks
+        uuid = self._sd_uuid
+        for lv in os.listdir(os.path.join("/dev", uuid)):
+            # skip all LVs that do not have proper name
+            if not lv.startswith(constants.SD_METADATA_DIR + "-"):
+                continue
+
+            # strip the prefix and use the rest as symlink name
+            service = lv.split(constants.SD_METADATA_DIR + "-", 1)[-1]
+            service_link = os.path.join(self._storage_path, service)
+            os.unlink(service_link)
+            os.symlink(os.path.join("/dev", uuid, lv), service_link)
+
+    def disconnect(self):
+        pass
+
+    def lvcreate(self, vg_uuid, lv_name, size_bytes, popen=subprocess.Popen):
+        """
+        Call lvm lvcreate and ask it to create a Logical Volume in the
+        Storage Domain's Volume Group. It should be named lv_name
+        and be big enough to fit size_bytes into it.
+        """
+        lvc = popen(stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+                    stderr=subprocess.PIPE,
+                    args=["lvm", "lvcreate", "-L", str(size_bytes)+"B",
+                          "-n", lv_name, vg_uuid])
+        lvc.wait()
+
+    def create(self, service_map):
+        for service, size in service_map.iteritems():
+            service_path = os.path.join(self._storage_path, service)
+            if self._lv_based:
+                lvname = "-".join([constants.SD_METADATA_DIR, service])
+                self.lvcreate(self._sd_uuid, lvname, size)
+            else:
+                # file based storage
+                with open(service_path, "w") as f:
+                    # create an empty file, possibly utilizing
+                    # sparse files if size was provided
+                    if size:
+                        f.seek(size - 1)
+                        f.write(0)
+
+        # reconnect so all links are refreshed
+        self.disconnect()
+        self.connect()
diff --git a/ovirt_hosted_engine_ha/lib/storage_backends_test.py 
b/ovirt_hosted_engine_ha/lib/storage_backends_test.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ovirt_hosted_engine_ha/lib/storage_backends_test.py


-- 
To view, visit http://gerrit.ovirt.org/25797
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Iaced4ac08936fc9314ff821343ce33d29a5897cf
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-hosted-engine-ha
Gerrit-Branch: master
Gerrit-Owner: Martin Sivák <msi...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to