Sandro Bonazzola has uploaded a new change for review.

Change subject: WIP: add glusterfs support to hosted engine setup
......................................................................

WIP: add glusterfs support to hosted engine setup

Should allow to use existing glusterfs storage.
Will fail with glusterfs provisioning since implementation is
not yet finished.

Change-Id: I33bb4ef586bbad7fa3e2de29d63e4e6ff9d86bab
Signed-off-by: Sandro Bonazzola <sbona...@redhat.com>
---
M ovirt-hosted-engine-setup.spec.in
M src/ovirt_hosted_engine_setup/constants.py
M src/plugins/ovirt-hosted-engine-setup/storage/Makefile.am
M src/plugins/ovirt-hosted-engine-setup/storage/__init__.py
A src/plugins/ovirt-hosted-engine-setup/storage/glusterfs.py
M src/plugins/ovirt-hosted-engine-setup/storage/nfs.py
M src/plugins/ovirt-hosted-engine-setup/storage/storage.py
7 files changed, 327 insertions(+), 47 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-hosted-engine-setup 
refs/changes/08/36108/1

diff --git a/ovirt-hosted-engine-setup.spec.in 
b/ovirt-hosted-engine-setup.spec.in
index 82a801f..79fb377 100644
--- a/ovirt-hosted-engine-setup.spec.in
+++ b/ovirt-hosted-engine-setup.spec.in
@@ -36,28 +36,21 @@
 BuildRoot:      %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
 BuildArch:      noarch
 
+
+Requires:       bind-utils
+Requires:       iptables
+Requires:       lsof
+Requires:       openssh-server
+Requires:       openssl
 Requires:       python
 Requires:       python-ethtool >= 0.6-3
-Requires:       otopi >= 1.3.0
-Requires:       vdsm >= 4.16.0
-Requires:       vdsm-cli >= 4.16.0
-Requires:       vdsm-python >= 4.16.0
-Requires:       ovirt-host-deploy >= 1.3.0
-Requires:       openssh-server
 Requires:       python-paramiko
-Requires:       virt-viewer
-Requires:       openssl
-Requires:       sudo
-Requires:       bind-utils
-Requires:       ovirt-hosted-engine-ha >= 1.3.0
 Requires:       sanlock >= 2.8
 Requires:       sanlock-python >= 2.8
-Requires:       lsof
-Requires:       iptables
+Requires:       sudo
+Requires:       virt-viewer
 BuildRequires:  gettext
 BuildRequires:  python2-devel
-
-Requires:       %{engine}-sdk-python >= 3.6.0.0
 
 %if 0%{?fedora}
 Requires:       qemu-img
@@ -67,6 +60,20 @@
 Requires:       qemu-img-rhev >= 0.12.1.2-2.415
 %endif
 
+
+Requires:       otopi >= 1.3.0
+Requires:       ovirt-host-deploy >= 1.3.0
+Requires:       ovirt-hosted-engine-ha >= 1.3.0
+Requires:       %{engine}-sdk-python >= 3.6.0.0
+Requires:       vdsm >= 4.17.0
+Requires:       vdsm-cli >= 4.17.0
+Requires:       vdsm-gluster >= 4.17.0
+Requires:       vdsm-python >= 4.17.0
+
+
+Requires:       glusterfs-server >= 3.6.1
+
+
 %description
 Hosted engine tool for oVirt project.
 
diff --git a/src/ovirt_hosted_engine_setup/constants.py 
b/src/ovirt_hosted_engine_setup/constants.py
index 39dd203..0724233 100644
--- a/src/ovirt_hosted_engine_setup/constants.py
+++ b/src/ovirt_hosted_engine_setup/constants.py
@@ -65,6 +65,7 @@
 @util.codegen
 class FileSystemTypes(object):
     NFS = 'nfs'
+    GLUSTERFS = 'glusterfs'
 
 
 @util.export
@@ -272,6 +273,8 @@
 class Const(object):
     MINIMUM_SPACE_STORAGEDOMAIN_MB = 20480
     FIRST_HOST_ID = 1
+    GLUSTERD_SERVICE = 'glusterd'
+    GLUSTERFSD_SERVICE = 'glusterfsd'
     HA_AGENT_SERVICE = 'ovirt-ha-agent'
     HA_BROCKER_SERVICE = 'ovirt-ha-broker'
     HOSTED_ENGINE_VM_NAME = 'HostedEngine'
@@ -502,6 +505,22 @@
     )
     def DOMAIN_TYPE(self):
         return 'OVEHOSTED_STORAGE/domainType'
+
+    @ohostedattrs(
+        answerfile=True,
+        summary=True,
+        description=_('GlusterFS Brick Provisioning'),
+    )
+    def GLUSTER_PROVISIONING_ENABLED(self):
+        return 'OVEHOSTED_STORAGE/glusterProvisioningEnabled'
+
+    @ohostedattrs(
+        answerfile=True,
+        summary=True,
+        description=_('GlusterFS Share Name'),
+    )
+    def GLUSTER_SHARE_NAME(self):
+        return 'OVEHOSTED_STORAGE/glusterProvisionedShareName'
 
     @ohostedattrs(
         answerfile=True,
@@ -763,6 +782,7 @@
     CONFIG_STORAGE_LATE = 'ohosted.storage.configuration.late'
     CONFIG_STORAGE_ISCSI = 'ohosted.storage.iscsi.configuration.available'
     CONFIG_STORAGE_NFS = 'ohosted.storage.nfs.configuration.available'
+    GLUSTER_PROVISIONING = 'ohosted.storage.gluster.provisioned'
     CONFIG_ADDITIONAL_HOST = 'ohosted.core.additional.host'
     REQUIRE_ANSWER_FILE = 'ohosted.core.require.answerfile'
     CONFIG_OVF_IMPORT = 'ohosted.configuration.ovf'
@@ -811,6 +831,7 @@
 @util.export
 @util.codegen
 class Defaults(object):
+    DEFAULT_GLUSTER_SHARE_NAME = 'hosted_engine_glusterfs'
     DEFAULT_STORAGE_DOMAIN_NAME = 'hosted_storage'
     DEFAULT_STORAGE_DATACENTER_NAME = 'hosted_datacenter'
     DEFAULT_VDSMD_SERVICE = 'vdsmd'
diff --git a/src/plugins/ovirt-hosted-engine-setup/storage/Makefile.am 
b/src/plugins/ovirt-hosted-engine-setup/storage/Makefile.am
index 07a084a..a9c87aa 100644
--- a/src/plugins/ovirt-hosted-engine-setup/storage/Makefile.am
+++ b/src/plugins/ovirt-hosted-engine-setup/storage/Makefile.am
@@ -25,6 +25,7 @@
 
 mydir=$(ovirthostedengineplugindir)/ovirt-hosted-engine-setup/storage
 dist_my_PYTHON = \
+       glusterfs.py \
        __init__.py \
        iscsi.py \
        nfs.py \
diff --git a/src/plugins/ovirt-hosted-engine-setup/storage/__init__.py 
b/src/plugins/ovirt-hosted-engine-setup/storage/__init__.py
index 9f486bf..d14c89b 100644
--- a/src/plugins/ovirt-hosted-engine-setup/storage/__init__.py
+++ b/src/plugins/ovirt-hosted-engine-setup/storage/__init__.py
@@ -24,6 +24,7 @@
 from otopi import util
 
 
+from . import glusterfs
 from . import iscsi
 from . import nfs
 from . import storage
@@ -31,6 +32,7 @@
 
 @util.export
 def createPlugins(context):
+    glusterfs.Plugin(context=context)
     iscsi.Plugin(context=context)
     nfs.Plugin(context=context)
     storage.Plugin(context=context)
diff --git a/src/plugins/ovirt-hosted-engine-setup/storage/glusterfs.py 
b/src/plugins/ovirt-hosted-engine-setup/storage/glusterfs.py
new file mode 100644
index 0000000..22337e3
--- /dev/null
+++ b/src/plugins/ovirt-hosted-engine-setup/storage/glusterfs.py
@@ -0,0 +1,243 @@
+#
+# ovirt-hosted-engine-setup -- ovirt hosted engine setup
+# Copyright (C) 2014 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+
+"""
+GlusterFS storage provisioning plugin.
+"""
+
+import gettext
+
+
+from otopi import util
+from otopi import plugin
+from vdsm import vdscli
+
+
+from ovirt_hosted_engine_setup import constants as ohostedcons
+from ovirt_hosted_engine_setup import domains as ohosteddomains
+
+
+_ = lambda m: gettext.dgettext(message=m, domain='ovirt-hosted-engine-setup')
+
+
+@util.export
+class Plugin(plugin.PluginBase):
+    """
+    GlusterFS storage provisioning plugin.
+    """
+
+    def __init__(self, context):
+        super(Plugin, self).__init__(context=context)
+        self._checker = ohosteddomains.DomainChecker()
+
+    def _provision_gluster_volume(self):
+        """
+        Set parameters as suggested by Gluster Storage Domain Reference
+        @see: http://www.ovirt.org/Gluster_Storage_Domain_Reference
+        """
+        cli = vdscli.connect()
+        self.logger.debug('glusterVolumesList')
+        response = cli.glusterVolumesList()
+        self.logger.debug(response)
+        if response['status']['code'] != 0:
+            # TODO: check if a more informative message can be given
+            raise RuntimeError(response['status']['message'])
+
+        # TODO: ask user for bricks
+        # FIXME: VDSM is now requiring replica 2 and stripe 2 as minimum
+        brick_list = [
+            '192.168.1.107:/gluster',
+            '192.168.1.107:/gluster2',
+            '192.168.1.107:/gluster3',
+            '192.168.1.107:/gluster4'
+        ]
+        replica_count = '2'
+        stripe_count = '2'
+        transport_list = ['tcp']
+        force = True
+        response = cli.glusterVolumeCreate(
+            self.environment[ohostedcons.StorageEnv.GLUSTER_SHARE_NAME],
+            brick_list,
+            replica_count,
+            stripe_count,
+            transport_list,
+            force
+        )
+        self.logger.debug(response)
+        if response['status']['code'] != 0:
+            # TODO: check if a more informative message can be given
+            raise RuntimeError(response['status']['message'])
+        # gluster_uuid =  response['uuid']
+        # TODO: check if we need this UUID for something
+        for option, value in {
+            'cluster.quorum-type': 'auto',
+            'network.ping-timeout': '10',
+            'nfs.disable': 'on',
+            'user.cifs': 'disable',
+            'auth.allow': '*',
+            'group': 'virt',
+            'storage.owner-uid': '36',
+            'storage.owner-gid': '36',
+        }:
+            response = cli.glusterVolumeSet(
+                self.environment[ohostedcons.StorageEnv.GLUSTER_SHARE_NAME],
+                option,
+                value
+            )
+            if response['status']['code'] != 0:
+                # TODO: check if a more informative message can be given
+                raise RuntimeError(response['status']['message'])
+
+        response = cli.glusterVolumesList()
+        self.logger.debug(response)
+        if response['status']['code'] != 0:
+            # TODO: check if a more informative message can be given
+            raise RuntimeError(response['status']['message'])
+
+        response = cli.glusterTasksList()
+        self.logger.debug(response)
+        if response['status']['code'] != 0:
+            # TODO: check if a more informative message can be given
+            raise RuntimeError(response['status']['message'])
+        # TODO: check if we need to do something about these tasks
+
+        response = cli.glusterVolumeStart(
+            self.environment[ohostedcons.StorageEnv.GLUSTER_SHARE_NAME]
+        )
+        if response['status']['code'] != 0:
+            # TODO: check if a more informative message can be given
+            raise RuntimeError(response['status']['message'])
+
+        response = cli.glusterVolumesList()
+        self.logger.debug(response)
+        if response['status']['code'] != 0:
+            # TODO: check if a more informative message can be given
+            raise RuntimeError(response['status']['message'])
+
+    @plugin.event(
+        stage=plugin.Stages.STAGE_INIT,
+    )
+    def _init(self):
+        self.environment.setdefault(
+            ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED,
+            None
+        )
+        self.environment.setdefault(
+            ohostedcons.StorageEnv.GLUSTER_SHARE_NAME,
+            ohostedcons.Defaults.DEFAULT_GLUSTER_SHARE_NAME
+        )
+
+    @plugin.event(
+        stage=plugin.Stages.STAGE_CUSTOMIZATION,
+        name=ohostedcons.Stages.GLUSTER_PROVISIONING,
+        after=(
+            ohostedcons.Stages.CONFIG_STORAGE_EARLY,
+        ),
+        before=(
+            ohostedcons.Stages.CONFIG_STORAGE_NFS,
+        ),
+    )
+    def _customization(self):
+        """
+        if GLUSTER_PROVISIONING_ENABLED is False, then a remote GlusterFS
+        server will be used and validated as if it was a NFS server.
+        Control will then move to nfs.py plugin.
+        """
+        if self.environment[
+            ohostedcons.StorageEnv.DOMAIN_TYPE
+        ] != ohostedcons.DomainTypes.GLUSTERFS:
+            self.environment[
+                ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED
+            ] = False
+        interactive = self.environment[
+            ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED
+        ] is None
+        if interactive:
+            self.environment[
+                ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED
+            ] = self.dialog.queryString(
+                name='OVEHOSTED_GLUSTER_PROVISIONING',
+                note=_(
+                    'Do you want to configure this host for '
+                    'providing GlusterFS storage?: '
+                ),
+                prompt=True,
+                validValues=(_('Yes'), _('No')),
+                caseSensitive=False,
+                default=_('Yes')
+            ) == _('Yes').lower()
+
+    @plugin.event(
+        stage=plugin.Stages.STAGE_MISC,
+        condition=lambda self: self.environment[
+            ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED
+        ],
+        after=(
+            ohostedcons.Stages.VDSMD_START,
+        ),
+        before=(
+            ohostedcons.Stages.STORAGE_AVAILABLE,
+        )
+    )
+    def _misc(self):
+        """
+        If GlusterFS provisioning was requested:
+        - configure GlusterFS for allowing HyperConvergence
+        - starts GlusterFS services
+        - create bricks
+        """
+        # TODO: configure glusterfs service
+        self.logger.info(_('Starting GlusterFS services'))
+        for service in (
+            ohostedcons.Const.GLUSTERD_SERVICE,
+            ohostedcons.Const.GLUSTERFSD_SERVICE,
+        ):
+            self.services.state(
+                name=service,
+                state=True,
+            )
+        self._provision_gluster_volume()
+
+    @plugin.event(
+        stage=plugin.Stages.STAGE_CLOSEUP,
+        condition=lambda self: self.environment[
+            ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED
+        ],
+    )
+    def _closeup(self):
+        """
+        Enable GlusterFS services if GlusterFS provisioning was requested
+        """
+        self.logger.info(_('Enabling GlusterFS services'))
+        for service in (
+            ohostedcons.Const.GLUSTERD_SERVICE,
+            ohostedcons.Const.GLUSTERFSD_SERVICE,
+        ):
+            self.services.startup(
+                name=service,
+                state=True,
+            )
+            self.services.state(
+                name=service,
+                state=True,
+            )
+
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/plugins/ovirt-hosted-engine-setup/storage/nfs.py 
b/src/plugins/ovirt-hosted-engine-setup/storage/nfs.py
index 3e03b05..c689ba1 100644
--- a/src/plugins/ovirt-hosted-engine-setup/storage/nfs.py
+++ b/src/plugins/ovirt-hosted-engine-setup/storage/nfs.py
@@ -19,7 +19,7 @@
 
 
 """
-Local storage domain plugin.
+NFS / GlusterFS storage plugin.
 """
 
 import os
@@ -61,6 +61,8 @@
         elif domain_type == ohostedcons.DomainTypes.NFS4:
             fstype = ohostedcons.FileSystemTypes.NFS
             opts.append('vers=4')
+        elif domain_type == ohostedcons.DomainTypes.GLUSTERFS:
+            fstype = ohostedcons.FileSystemTypes.GLUSTERFS
 
         if fstype == ohostedcons.FileSystemTypes.NFS:
             opts.append('retry=1')
@@ -187,14 +189,15 @@
         before=(
             ohostedcons.Stages.CONFIG_STORAGE_LATE,
         ),
-        condition=(
-            lambda self: self.environment[
-                ohostedcons.StorageEnv.DOMAIN_TYPE
-            ] in (
-                # ohostedcons.DomainTypes.GLUSTERFS,
+        condition=lambda self: (
+            self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE] in (
+                ohostedcons.DomainTypes.GLUSTERFS,
                 ohostedcons.DomainTypes.NFS3,
                 ohostedcons.DomainTypes.NFS4,
-            )
+            ) and
+            not self.environment[
+                ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED
+            ]
         ),
     )
     def _customization(self):
@@ -278,17 +281,18 @@
         before=(
             ohostedcons.Stages.DIALOG_TITLES_E_STORAGE,
         ),
-        condition=(
-            lambda self: (
-                not self.environment[
-                    ohostedcons.CoreEnv.IS_ADDITIONAL_HOST
-                ] and
-                self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE] in (
-                    # ohostedcons.DomainTypes.GLUSTERFS,
-                    ohostedcons.DomainTypes.NFS3,
-                    ohostedcons.DomainTypes.NFS4,
-                )
-            )
+        condition=lambda self: (
+            not self.environment[
+                ohostedcons.CoreEnv.IS_ADDITIONAL_HOST
+            ] and
+            self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE] in (
+                ohostedcons.DomainTypes.GLUSTERFS,
+                ohostedcons.DomainTypes.NFS3,
+                ohostedcons.DomainTypes.NFS4,
+            ) and
+            not self.environment[
+                ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED
+            ]
         ),
     )
     def _late_customization(self):
diff --git a/src/plugins/ovirt-hosted-engine-setup/storage/storage.py 
b/src/plugins/ovirt-hosted-engine-setup/storage/storage.py
index a083e0a..3a7b58a 100644
--- a/src/plugins/ovirt-hosted-engine-setup/storage/storage.py
+++ b/src/plugins/ovirt-hosted-engine-setup/storage/storage.py
@@ -425,18 +425,21 @@
             ohostedcons.VDSMConstants.NFS_DOMAIN,
             ohostedcons.VDSMConstants.GLUSTERFS_DOMAIN,
         ):
-            conList = [
-                {
-                    'connection': self.environment[
-                        ohostedcons.StorageEnv.STORAGE_DOMAIN_CONNECTION
-                    ],
-                    'user': 'kvm',
-                    'id': self.environment[
-                        ohostedcons.StorageEnv.CONNECTION_UUID
-                    ],
-                    'protocol_version': self.protocol_version,
-                }
-            ]
+            conDict = {
+                'connection': self.environment[
+                    ohostedcons.StorageEnv.STORAGE_DOMAIN_CONNECTION
+                ],
+                'user': 'kvm',
+                'id': self.environment[
+                    ohostedcons.StorageEnv.CONNECTION_UUID
+                ],
+            }
+            if self.storageType == ohostedcons.VDSMConstants.NFS_DOMAIN:
+                conDict['protocol_version'] = self.protocol_version
+            if self.storageType == ohostedcons.VDSMConstants.GLUSTERFS_DOMAIN:
+                conDict['tpgt'] = '1'
+                conDict['vfs_type'] = 'glusterfs'
+            conList = [conDict]
         elif self.storageType in (
             ohostedcons.VDSMConstants.ISCSI_DOMAIN,
         ):
@@ -845,8 +848,7 @@
                 prompt=True,
                 caseSensitive=True,
                 validValues=(
-                    # Enable when glusterfs issues are solved:
-                    # ohostedcons.DomainTypes.GLUSTERFS,
+                    ohostedcons.DomainTypes.GLUSTERFS,
                     ohostedcons.DomainTypes.ISCSI,
                     ohostedcons.DomainTypes.NFS3,
                     ohostedcons.DomainTypes.NFS4,


-- 
To view, visit http://gerrit.ovirt.org/36108
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I33bb4ef586bbad7fa3e2de29d63e4e6ff9d86bab
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-hosted-engine-setup
Gerrit-Branch: master
Gerrit-Owner: Sandro Bonazzola <sbona...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to