summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py84
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_libs.py49
-rw-r--r--tests/bvt/test_bvt_lite_and_plus.py213
-rw-r--r--tests/gluster_tests_config.yml269
4 files changed, 576 insertions, 39 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index e63be6988..8ab513d00 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -24,7 +24,7 @@ import unittest
from glusto.core import Glusto as g
import os
-
+import random
class runs_on(g.CarteTestClass):
"""Decorator providing runs_on capability for standard unittest script"""
@@ -85,7 +85,7 @@ class GlusterBaseClass(unittest.TestCase):
},
'dispersed': {
'type': 'dispersed',
- 'disperse_count': 4,
+ 'disperse_count': 6,
'redundancy_count': 2,
'transport': 'tcp'
},
@@ -97,13 +97,13 @@ class GlusterBaseClass(unittest.TestCase):
'distributed-replicated': {
'type': 'distributed-replicated',
'dist_count': 2,
- 'replica_count': 2,
+ 'replica_count': 3,
'transport': 'tcp'
},
'distributed-dispersed': {
'type': 'distributed-dispersed',
'dist_count': 2,
- 'disperse_count': 4,
+ 'disperse_count': 6,
'redundancy_count': 2,
'transport': 'tcp'
}
@@ -112,12 +112,13 @@ class GlusterBaseClass(unittest.TestCase):
# Get the volume configuration.
cls.volume = {}
found_volume = False
- if 'volumes' in g.config['gluster']:
- for volume in g.config['gluster']['volumes']:
- if volume['voltype']['type'] == cls.volume_type:
- cls.volume = volume
- found_volume = True
- break
+ if 'gluster' in g.config:
+ if 'volumes' in g.config['gluster']:
+ for volume in g.config['gluster']['volumes']:
+ if volume['voltype']['type'] == cls.volume_type:
+ cls.volume = volume
+ found_volume = True
+ break
if found_volume:
if not 'name' in cls.volume:
@@ -144,28 +145,59 @@ class GlusterBaseClass(unittest.TestCase):
cls.volume_type)
return False
+ # SMB Info
+ if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
+ cls.volume['smb'] = {}
+ cls.volume['smb']['enable'] = True
+ users_info_found = False
+ try:
+ if cls.volume['smb']['users_info']:
+ users_info_found = True
+ except KeyError:
+ users_info_found = False
+
+ if not users_info_found:
+ cls.volume['smb']['users_info'] = {}
+ try:
+ cls.volume['smb']['users_info'] = (
+ g.config['gluster']['cluster_config']['smb']
+ ['users_info'])
+ except KeyError:
+ pass
+
+ if not cls.volume['smb']['users_info']:
+ cls.volume['smb']['users_info']['root'] = {}
+ cls.volume['smb']['users_info']['root']['password'] = (
+ 'foobar')
+
# Define Volume variables.
cls.volname = cls.volume['name']
cls.servers = cls.volume['servers']
cls.voltype = cls.volume['voltype']['type']
cls.mnode = cls.servers[0]
+ try:
+ cls.smb_users_info = cls.volume['smb']['users_info']
+ except KeyError:
+ cls.smb_users_info = {}
# Get the mount configuration.
cls.mounts_dict_list = []
cls.mounts = []
found_mount = False
- if 'mounts' in g.config['gluster']:
- for mount in g.config['gluster']['mounts']:
- if mount['protocol'] == cls.mount_type:
- if not 'volname' in mount:
- mount['volname'] = cls.volname
- if not 'server' in mount:
- mount['server'] = mnode
- if not 'mountpoint' in mount:
- mount['mountpoint'] = (os.path.join(
- "/mnt", '_'.join([cls.volname, cls.mount_type])))
- cls.mounts_dict_list.append(mount)
- found_mount = True
+ if 'gluster' in g.config:
+ if 'mounts' in g.config['gluster']:
+ for mount in g.config['gluster']['mounts']:
+ if mount['protocol'] == cls.mount_type:
+ if ('volname' not in mount or (not mount['volname'])):
+ mount['volname'] = cls.volname
+ if ('server' not in mount or (not mount['server'])):
+ mount['server'] = mnode
+ if ('mountpoint' not in mount or
+ (not mount['mountpoint'])):
+ mount['mountpoint'] = (os.path.join(
+ "/mnt", '_'.join([cls.volname, cls.mount_type])))
+ cls.mounts_dict_list.append(mount)
+ found_mount = True
if not found_mount:
for client in g.config['clients']:
mount = {
@@ -180,6 +212,14 @@ class GlusterBaseClass(unittest.TestCase):
'options': ''
}
cls.mounts_dict_list.append(mount)
+
+ if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
+ for mount in cls.mounts_dict_list:
+ if 'smbuser' not in mount:
+ mount['smbuser'] = random.choice(cls.smb_users_info.keys())
+ mount['smbpasswd'] = (
+ cls.smb_users_info[mount['smbuser']]['password'])
+
from glustolibs.gluster.mount_ops import create_mount_objs
cls.mounts = create_mount_objs(cls.mounts_dict_list)
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_libs.py b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
index 37af7fa57..24aac821d 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
@@ -22,7 +22,18 @@
from glusto.core import Glusto as g
import time
-from glustolibs.gluster.volume_ops import get_volume_info
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.volume_ops import (volume_create, volume_start,
+ set_volume_options, get_volume_info,
+ volume_stop, volume_delete)
+from glustolibs.gluster.tiering_ops import (add_extra_servers_to_cluster,
+ tier_attach,
+ is_tier_process_running)
+from glustolibs.gluster.quota_ops import (enable_quota, set_quota_limit_usage,
+ is_quota_enabled)
+from glustolibs.gluster.uss_ops import enable_uss, is_uss_enabled
+from glustolibs.gluster.samba_ops import share_volume_over_smb
+from glustolibs.gluster.snap_ops import snap_delete_by_volumename
def setup_volume(mnode, all_servers_info, volume_config, force=False):
@@ -180,6 +191,7 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
else:
g.log.error("Invalid volume type defined in config")
return False
+
# Get transport type
if 'transport' in volume_config['voltype']:
transpor_type = volume_config['voltype']['transport']
@@ -187,7 +199,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
transport_type = 'tcp'
# get bricks_list
- from glustolibs.gluster.lib_utils import form_bricks_list
bricks_list = form_bricks_list(mnode=mnode, volname=volname,
number_of_bricks=number_of_bricks,
servers=servers,
@@ -196,8 +207,8 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
g.log.error("Number_of_bricks is greater than the unused bricks on "
"servers")
return False
+
# Create volume
- from glustolibs.gluster.volume_ops import volume_create
ret, out, err = volume_create(mnode=mnode, volname=volname,
bricks_list=bricks_list, force=force,
**kwargs)
@@ -207,7 +218,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
# Start Volume
time.sleep(2)
- from glustolibs.gluster.volume_ops import volume_start
ret = volume_start(mnode, volname)
if not ret:
g.log.error("volume start %s failed" % volname)
@@ -217,7 +227,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
if ('tier' in volume_config and 'create_tier' in volume_config['tier'] and
volume_config['tier']['create_tier']):
# get servers info for tier attach
- from glustolibs.gluster.tiering_ops import add_extra_servers_to_cluster
if ('extra_servers' in volume_config and
volume_config['extra_servers']):
extra_servers = volume_config['extra_servers']
@@ -256,7 +265,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
number_of_bricks = dist * rep
# Attach Tier
- from glustolibs.gluster.tiering_ops import tier_attach
ret, out, err = tier_attach(mnode=mnode, volname=volname,
extra_servers=extra_servers,
extra_servers_info=all_servers_info,
@@ -268,7 +276,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
time.sleep(30)
# Check if tier is running
- from glustolibs.gluster.tiering_ops import is_tier_process_running
rc = True
for server in extra_servers:
ret = is_tier_process_running(server, volname)
@@ -281,7 +288,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
# Enable Quota
if ('quota' in volume_config and 'enable' in volume_config['quota'] and
volume_config['quota']['enable']):
- from glustolibs.gluster.quota_ops import enable_quota
ret, _, _ = enable_quota(mnode=mnode, volname=volname)
if ret != 0:
g.log.error("Unable to set quota on the volume %s", volname)
@@ -303,7 +309,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
size = "100GB"
# Set quota_limit_usage
- from glustolibs.gluster.quota_ops import set_quota_limit_usage
ret, _, _ = set_quota_limit_usage(mnode=mnode, volname=volname,
path=path, limit=size)
if ret != 0:
@@ -311,7 +316,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
return False
# Check if quota is enabled
- from glustolibs.gluster.quota_ops import is_quota_enabled
ret = is_quota_enabled(mnode=mnode, volname=volname)
if not ret:
g.log.error("Quota not enabled on the volume %s", volname)
@@ -320,13 +324,11 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
# Enable USS
if ('uss' in volume_config and 'enable' in volume_config['uss'] and
volume_config['uss']['enable']):
- from glustolibs.gluster.uss_ops import enable_uss
ret, out, err = enable_uss(mnode=mnode, volname=volname)
if ret != 0:
g.log.error("Unable to enable uss on the volume %s", volname)
return False
- from glustolibs.gluster.uss_ops import is_uss_enabled
ret = is_uss_enabled(mnode=mnode, volname=volname)
if not ret:
g.log.error("USS is not enabled on the volume %s", volname)
@@ -342,10 +344,27 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
## g.log.error("failed to set the ganesha option for %s" % volname)
## return False
+ # Enable Samba
+ if ('smb' in volume_config and 'enable' in volume_config['smb'] and
+ volume_config['smb']['enable']):
+ smb_users_info = {}
+ if ('users_info' in volume_config['smb'] and
+ volume_config['smb']['users_info']):
+ smb_users_info = volume_config['smb']['users_info']
+ else:
+ g.log.error("SMB Users info not available in the volume config."
+ "Unable to export volume %s as SMB Share" % volname)
+ return False
+ ret = share_volume_over_smb(mnode=mnode, volname=volname,
+ servers=servers,
+ smb_users_info=smb_users_info)
+ if not ret:
+ g.log.error("Failed to export volume %s as SMB Share" % volname)
+ return False
+
# Set all the volume options:
if 'options' in volume_config:
volume_options = volume_config['options']
- from glustolibs.gluster.volume_ops import set_volume_options
ret = set_volume_options(mnode=mnode, volname=volname,
options=volume_options)
if not ret:
@@ -370,8 +389,6 @@ def cleanup_volume(mnode, volname):
Example:
cleanup_volume("abc.xyz.com", "testvol")
"""
- from glustolibs.gluster.snap_ops import snap_delete_by_volumename
-
volinfo = get_volume_info(mnode, volname)
if volinfo is None or volname not in volinfo:
g.log.info("Volume %s does not exist in %s" % (volname, mnode))
@@ -383,13 +400,11 @@ def cleanup_volume(mnode, volname):
"volume %s" % volname)
return False
- from glustolibs.gluster.volume_ops import volume_stop
ret, _, _ = volume_stop(mnode, volname, force=True)
if ret != 0:
g.log.error("Failed to stop volume %s" % volname)
return False
- from glustolibs.gluster.volume_ops import volume_delete
ret = volume_delete(mnode, volname)
if not ret:
g.log.error("Unable to cleanup the volume %s" % volname)
diff --git a/tests/bvt/test_bvt_lite_and_plus.py b/tests/bvt/test_bvt_lite_and_plus.py
new file mode 100644
index 000000000..e47c13477
--- /dev/null
+++ b/tests/bvt/test_bvt_lite_and_plus.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import pytest
+import os
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.gluster_init import start_glusterd
+from glustolibs.gluster.peer_ops import (peer_probe_servers, is_peer_connected,
+ peer_status)
+from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
+from glustolibs.gluster.volume_ops import volume_info, volume_status
+import time
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['glusterfs', 'nfs', 'cifs']])
+class BvtTestsClass(GlusterBaseClass):
+ """Class containing case for : BVT Lite and BVT Plus.
+
+ BVT Lite: Run the case on dis-rep volume with glusterfs, nfs, cifs
+ protocols
+
+ BVT Plus: Run the case on all volume types and all protocol types
+ combinations
+ """
+ @classmethod
+ def setUpClass(cls):
+ """Following are the setps in setupclass
+ - Start glusterd on all servers
+ - Peer Probe
+ - Setup the volume
+ - Mount the volume
+ """
+ GlusterBaseClass.setUpClass.im_func(cls)
+ g.log.info("Starting %s:" % cls.__name__)
+
+ # Start Glusterd
+ ret = start_glusterd(servers=cls.servers)
+ assert (ret == True), "glusterd did not start on at least one server"
+
+ # PeerProbe servers
+ ret = peer_probe_servers(mnode=cls.servers[0], servers=cls.servers[1:])
+ assert (ret == True), "Unable to peer probe one or more servers"
+
+ # Validate if peer is connected from all the servers
+ for server in cls.servers:
+ ret = is_peer_connected(server, cls.servers)
+ assert (ret == True), "Validating Peers to be in Cluster Failed"
+
+ # Print Peer Status from mnode
+ _, _, _ = peer_status(cls.mnode)
+
+ # Setup Volume
+ ret = setup_volume(mnode=cls.mnode,
+ all_servers_info=cls.all_servers_info,
+ volume_config=cls.volume, force=True)
+ assert (ret == True), "Setup volume %s failed" % cls.volname
+ time.sleep(10)
+
+ # Print Volume Info and Status
+ _, _, _ = volume_info(cls.mnode, cls.volname)
+
+ _, _, _ = volume_status(cls.mnode, cls.volname)
+
+ # Validate if volume is exported or not
+ if 'nfs' in cls.mount_type:
+ cmd = "showmount -e localhost"
+ _, _, _ = g.run(cls.mnode, cmd)
+
+ cmd = "showmount -e localhost | grep %s" % cls.volname
+ ret, _, _ = g.run(cls.mnode, cmd)
+ assert (ret == 0), "Volume %s not exported" % cls.volname
+
+ if 'cifs' in cls.mount_type:
+ cmd = "smbclient -L localhost"
+ _, _, _ = g.run(cls.mnode, cmd)
+
+ cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " %
+ cls.volname)
+ ret, _, _ = g.run(cls.mnode, cmd)
+ assert (ret == 0), ("Volume %s not accessable via SMB/CIFS share" %
+ cls.volname)
+
+ # Create Mounts
+ rc = True
+ for mount_obj in cls.mounts:
+ ret = mount_obj.mount()
+ if not ret:
+ g.log.error("Unable to mount volume '%s:%s' on '%s:%s'" %
+ (mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint))
+ rc = False
+ assert (rc == True), ("Mounting volume %s on few clients failed" %
+ cls.volname)
+
+ # Upload io scripts
+ cls.script_local_path = ("/usr/share/glustolibs/io/"
+ "scripts/file_dir_ops.py")
+ cls.script_upload_path = "/tmp/file_dir_ops.py"
+ ret = os.path.exists(cls.script_local_path)
+ assert (ret == True), ("Unable to find the io scripts")
+
+ for client in cls.clients:
+ g.upload(client, cls.script_local_path, cls.script_upload_path)
+ g.run(client, "ls -l %s" % cls.script_upload_path)
+ g.run(client, "chmod +x %s" % cls.script_upload_path)
+ g.run(client, "ls -l %s" % cls.script_upload_path)
+
+ def setUp(self):
+ pass
+
+ def test_bvt(self):
+ """Test IO from the mounts.
+ """
+ g.log.info("Starting Test: %s on %s %s" %
+ (self.id(), self.volume_type, self.mount_type))
+
+ # Get stat of mount before the IO
+ for mount_obj in self.mounts:
+ cmd = "mount | grep %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ cmd = "df -h %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ cmd = "ls -ld %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ cmd = "stat %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+
+ # Start IO on all mounts.
+ all_mounts_procs = []
+ count = 1
+ for mount_obj in self.mounts:
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path,
+ count, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+
+ # Get IO status
+ rc = True
+ for i, proc in enumerate(all_mounts_procs):
+ ret, _, _ = proc.async_communicate()
+ if ret != 0:
+ g.log.error("IO Failed on %s:%s" %
+ (self.mounts[i].client_system,
+ self.mounts[i].mountpoint))
+ rc = False
+ assert (rc == True), "IO failed on some of the clients"
+
+ # Get stat of all the files/dirs created.
+ all_mounts_procs = []
+ for mount_obj in self.mounts:
+ cmd = ("python %s stat "
+ "-R %s" % (self.script_upload_path, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ rc = True
+ for i, proc in enumerate(all_mounts_procs):
+ ret, _, _ = proc.async_communicate()
+ if ret != 0:
+ g.log.error("Stat of files and dirs under %s:%s Failed" %
+ (self.mounts[i].client_system,
+ self.mounts[i].mountpoint))
+ rc = False
+ assert (rc == True), "Stat failed on some of the clients"
+
+ def tearDown(self):
+ pass
+
+ @classmethod
+ def tearDownClass(cls):
+ """Cleanup mount and Cleanup the volume
+ """
+ GlusterBaseClass.tearDownClass.im_func(cls)
+
+ # Unmount mounts
+ rc = True
+ for mount_obj in cls.mounts:
+ ret = mount_obj.unmount()
+ if not ret:
+ g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'" %
+ (mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint))
+ rc = False
+ assert (rc == True), ("UnMounting volume %s on few clients failed" %
+ cls.volname)
+
+ # Cleanup Volume
+ ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
+ assert (ret == True), ("cleanup volume %s failed" % cls.volname)
diff --git a/tests/gluster_tests_config.yml b/tests/gluster_tests_config.yml
new file mode 100644
index 000000000..e0fcc8209
--- /dev/null
+++ b/tests/gluster_tests_config.yml
@@ -0,0 +1,269 @@
+log_file: /var/log/tests/gluster_tests.log
+log_level: DEBUG
+
+servers:
+ - server-vm1
+ - server-vm2
+ - server-vm3
+ - server-vm4
+ - server-vm5
+ - server-vm6
+ - server-vm7
+ - server-vm8
+ - server-vm9
+ - server-vm10
+ - server-vm11
+ - server-vm12
+
+clients:
+ - client-vm1
+ - client-vm2
+ - client-vm3
+ - client-vm4
+
+servers_info:
+ server-vm1: &server1
+ host: server-vm1
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm2: &server2
+ host: server-vm2
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm3: &server3
+ host: server-vm3
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm4: &server4
+ host: server-vm4
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm5: &server5
+ host: server-vm5
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm6: &server6
+ host: server-vm6
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm7: &server7
+ host: server-vm7
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm8: &server8
+ host: server-vm8
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm9: &server9
+ host: server-vm9
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm10: &server10
+ host: server-vm10
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm11: &server11
+ host: server-vm11
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm12: &server12
+ host: server-vm12
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+
+clients_info:
+ client-vm1: &client1
+ host: client-vm1
+ client-vm2: &client2
+ host: client-vm2
+ super_user: 'root'
+ client-vm3: &client3
+ host: client-vm3
+ super_user: 'Admin'
+ platform: 'windows'
+ client-vm4: &client4
+ host: client-vm4
+ super_user: 'Administrator'
+ platform: 'windows'
+
+gluster:
+ running_on_volumes: []
+ running_on_mounts: []
+
+ cluster_config:
+ smb:
+ enable: False
+ users_info:
+ 'root':
+ password: 'foobar'
+ acl: ''
+ 'user1':
+ password: 'xyz'
+ acl: ''
+ 'user2':
+ password: 'abc'
+ acl: ''
+ ctdb_setup: True
+ ctdb_servers: []
+ ctdb_vips:
+ - vip: vip1
+ routing_prefix: '23'
+ interface: 'eth0'
+ - vip: vip2
+ routing_prefix: '22'
+ interface: 'eth0'
+ ctdb_metavol_brick_path: ''
+
+ nfs_ganesha:
+ enable: False
+ num_of_nfs_ganesha_nodes: 4
+ vips: []
+
+ volume_types:
+ distributed: &distributed
+ type: distributed
+ dist_count: 4
+ transport: tcp
+ replicated: &replicated
+ type: replicated
+ replica_count: 3
+ arbiter_count: 1
+ transport: tcp
+ distributed-replicated: &distributed-replicated
+ type: distributed-replicated
+ dist_count: 2
+ replica_count: 3
+ transport: tcp
+ dispersed: &dispersed
+ type: dispersed
+ disperse_count: 6
+ redundancy_count: 2
+ transport: tcp
+ distributed-dispersed: &distributed-dispersed
+ type: distributed-dispersed
+ dist_count: 2
+ disperse_count: 6
+ redundancy_count: 2
+ transport: tcp
+
+ slave_volumes:
+ - &slave_vol1
+ voltype: *distributed-replicated
+ servers: [ server-vm5, server-vm6, server-vm7, server-vm8 ]
+ extra_servers: []
+ quota:
+ enable: False
+ limit_usage:
+ path: "/"
+ size: 100GB
+ percent:
+ limit_objects:
+ path: "/"
+ number:
+ percent:
+ alert_time:
+ soft_timeout:
+ hard_timeout:
+ inode_quota:
+ enable: False
+ bitrot:
+ enable: False
+ scrub_throttle:
+ scrub_frequency:
+
+ volumes:
+ - &vol1
+ name: testvol
+ voltype: *distributed-dispersed
+ servers: [ server-vm1, server-vm2, server-vm3, server-vm4 ]
+ extra_servers: [ server-vm9, server-vm10, server-vm11, server-vm12 ]
+ tier:
+ create_tier: False
+ tier_type: *distributed-replicated
+ quota:
+ enable: False
+ limit_usage:
+ path: "/"
+ size: 100GB
+ percent:
+ limit_objects:
+ path: "/"
+ number:
+ percent:
+ alert_time: 0
+ soft_timeout: 0
+ hard_timeout: 0
+ inode_quota:
+ enable: False
+ bitrot:
+ enable: False
+ scrub_throttle: 'aggressive'
+ scrub_frequency: 'hourly'
+ geo_rep:
+ create_geo_rep_session: False
+ slave_volumes: [ *slave_vol1 ]
+ user: 'root'
+ group: 'geogroup'
+ sync_mode: 'rsync'
+ options:
+ performance.readdir-ahead: "on"
+ snapshot:
+ use_snapshot: True
+ snap_jobname: 'snap_job'
+ snap_schedule: 2
+ uss:
+ enable: False
+ nfs_ganesha:
+ enable: False
+ smb:
+ enable: False
+
+ mounts:
+ - &mount1
+ protocol: 'glusterfs'
+ server: server-vm1
+ volname: testvol
+ client: &client1
+ mountpoint: ''
+ options: ''
+ - &mount2
+ protocol: 'nfs'
+ server: server-vm1
+ volname: testvol
+ client:
+ host: client-vm2
+ mountpoint: ''
+ options: ''
+ num_of_mounts: 1
+ - &mount3
+ protocol: 'smb'
+ server: server-vm2
+ volname: testvol
+ client: &client3
+ mountpoint: ''
+ options: ''
+ smbuser: 'user1'
+ smbpasswd: 'xyz'
+ num_of_mounts: 3
+ - &mount4
+ protocol: 'smb'
+ server: server-vm4
+ volname: testvol
+ client:
+ host: client-vm4
+ super_user: 'Administrator'
+ platform: 'windows'
+ mountpoint: ''
+ options: ''
+ smbuser: 'user2'
+ smbpasswd: 'abc'
+ - &mount5
+ protocol: 'cifs'
+ server: server-vm1
+ volname: testvol
+ client:
+ host: client-vm2
+ mountpoint: ''
+ options: ''
+ smbuser: 'user2'
+ smbpasswd: 'abc'