log_file: /var/log/tests/gluster_tests.log log_level: DEBUG # 'servers' is list of Hostnames/IP's of servers in the cluster. # This section has to be defined. servers: - server-vm1 - server-vm2 - server-vm3 - server-vm4 - server-vm5 - server-vm6 - server-vm7 - server-vm8 - server-vm9 - server-vm10 - server-vm11 - server-vm12 # 'clients' is list of Hostnames/IP's of clients in the cluster. # This section has to be defined. clients: - client-vm1 - client-vm2 - client-vm3 - client-vm4 # 'servers_info' is info about each server in the cluster. # each server_info is a dict with 'Hostname/IP' of the server as key. # The info should contain the host(Hostname/IP) of server, devices to use # for creating bricks, brick_root i.e dirname of brick mount point. # Note: Use the same Hostname/IP used in the above 'servers' section. # This section has to be defined. servers_info: server-vm1: &server1 host: server-vm1 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm2: &server2 host: server-vm2 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm3: &server3 host: server-vm3 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm4: &server4 host: server-vm4 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm5: &server5 host: server-vm5 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm6: &server6 host: server-vm6 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm7: &server7 host: server-vm7 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm8: &server8 host: server-vm8 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm9: &server9 host: server-vm9 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm10: &server10 host: server-vm10 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm11: &server11 host: server-vm11 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" server-vm12: &server12 host: server-vm12 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" # 'clients_info' is info about each client in the cluster. # each client_info is a dict with 'Hostname/IP' of the client as key. # The info should contain the host(Hostname/IP) of client, platform(Linux or # Windows), super_user name(Admin/Administrator/* in case of windows or root/* # in case of linux). # This section has to be defined. clients_info: client-vm1: &client1 host: client-vm1 client-vm2: &client2 host: client-vm2 super_user: 'root' client-vm3: &client3 host: client-vm3 super_user: 'Admin' platform: 'windows' client-vm4: &client4 host: client-vm4 super_user: 'Administrator' platform: 'windows' # This section contains details about the gluster volumes, mounts etc. # For defining volumes, mounts and/or anything realted to gluster, it has to be # under this section. gluster: # This is to define what volume types and mount protocols will be run # in this current test run. running_on_volumes: [] running_on_mounts: [] # This is to define what are the server log dirs and client log dirs # to inject any message required for debugging. server_gluster_logs_info: # Defaults: ["/var/log/glusterfs", "/var/log/samba"] dirs: [] # Defaults: ["/var/log/ganesha.log", "/var/log/ganesha-gfapi.log"] files: [] client_gluster_logs_info: # Defaults: ["/var/log/glusterfs"] dirs: [] # Defaults: [] files: [] # This section defines the details about 'nfs-ganesha' or 'samba' clusters # to be created. Define this section for setting up nfs-ganesha or # samba clusters. cluster_config: smb: enable: False users_info: 'root': password: 'foobar' acl: '' 'user1': password: 'xyz' acl: '' 'user2': password: 'abc' acl: '' ctdb_setup: True ctdb_servers: [] ctdb_vips: - vip: vip1 routing_prefix: '23' interface: 'eth0' - vip: vip2 routing_prefix: '22' interface: 'eth0' ctdb_metavol_brick_path: '' nfs_ganesha: enable: False num_of_nfs_ganesha_nodes: 4 vips: [] # 'volume_types' defines different volume types that we can create in # gluster and we have default values assigned to each of the volume # type to run the tests. This can be modified based on the # test configuration. Note: The 'keys' should remain same. volume_types: distributed: &distributed type: distributed dist_count: 4 transport: tcp replicated: &replicated type: replicated replica_count: 3 arbiter_count: 1 transport: tcp distributed-replicated: &distributed-replicated type: distributed-replicated dist_count: 2 replica_count: 3 transport: tcp dispersed: &dispersed type: dispersed disperse_count: 6 redundancy_count: 2 transport: tcp distributed-dispersed: &distributed-dispersed type: distributed-dispersed dist_count: 2 disperse_count: 6 redundancy_count: 2 transport: tcp volume_create_force: False # Volume options that has to be applicable to all volume types volume_options: ## performance.quick-read: "off" ## performance.read-ahead: "off" ## performance.io-cache: "off" ## performance.stat-prefetch: "off" ## performance.open-behind: "off" ## performance.readdir-ahead: "off" ## server.allow-insecure: "on" # If the volume is exported as SMB Share, then set the following # volume options on the share. smb_share_options: ## group: ## - metadata-cache ## - nl-cache # If the volume is exported as NFS-Ganesha export, then set the following # volume options on the export. nfs_ganesha_export_options: # slave volumes config goes under 'slave_volumes' section. This is a list # of all the slave volumes. slave_volumes: - &slave_vol1 voltype: *distributed-replicated servers: [ server-vm5, server-vm6, server-vm7, server-vm8 ] extra_servers: [] quota: enable: False limit_usage: path: "/" size: 100GB percent: limit_objects: path: "/" number: percent: alert_time: soft_timeout: hard_timeout: inode_quota: enable: False bitrot: enable: False scrub_throttle: scrub_frequency: # 'volumes' is list of all the volumes that we need to refer in the # testcases. Each item in the list is the details about the volume. # The volume should have 'name', 'voltype', 'servers' info defined. volumes: - &vol1 name: testvol voltype: *distributed-dispersed servers: [ server-vm1, server-vm2, server-vm3, server-vm4 ] # 'extra_servers' : To be used in case of add new servers to the cluster # in case of add-brick, replace-brick, attach-tier etc. extra_servers: [ server-vm9, server-vm10, server-vm11, server-vm12 ] # 'tier' : Tiering related info. set 'create_tier' to 'true' to attach # tier during volume setup when the tests calls glusto-tests # volume_setup for setting up volume. # 'tier_type' defines the tier volume type. tier: create_tier: False tier_type: *distributed-replicated # 'quota' : Quota related info. setting 'enable' to 'True' enables quota # during volume setup when the tests calls glusto-tests # volume_setup for setting up volume. # Other details of quota like 'limit_usage', 'limit_objects', # 'alert_time', 'soft_timeout', 'hard_timeout' can be defined # in this section and referred in test cases using 'g.config'. quota: enable: False limit_usage: path: "/" size: 100GB percent: limit_objects: path: "/" number: percent: alert_time: 0 soft_timeout: 0 hard_timeout: 0 inode_quota: enable: False # 'bitrot': Bitrot related info. setting 'enable' to 'True' will enable # bitrot during volume setup when the tests calls glusto-tests # volume_setup for setting up volume. # Other bitrot options like 'scrub_throttle', 'scrub_frequency' # and any other bitrot related info can be defined here and # referred in tets cases using 'g.config'. bitrot: enable: False scrub_throttle: 'aggressive' scrub_frequency: 'hourly' # 'options': Define volume options and it's values to be set during volume # setup when the tests calls glusto-tests volume_setup for # setting up the volume. options: performance.readdir-ahead: "on" # 'snapshot': snapshot related info. snapshot: use_snapshot: True snap_jobname: 'snap_job' snap_schedule: 2 # 'uss': USS related info. setting 'enable' to 'True' will enable # USS during volume setup when the tests calls glusto-tests # volume_setup for setting up volume. uss: enable: False # 'smb': setting 'enable' to 'True' will export the volume created as # 'SMB' share. smb: enable: False nfs_ganesha: enable: False geo_rep: create_geo_rep_session: False slave_volumes: [ *slave_vol1 ] user: 'root' group: 'geogroup' sync_mode: 'rsync' # 'mounts' is list of all mount points info.Each item in the list is the # details about the mountpoint. i.e 'protocol', 'server', 'volname', # 'client_info'('host', 'super_user', 'platform'), mountpoint, # mount options.In case of cifs/smb mount 'smbuser', 'smbpasswd' is also # required. mounts: - &mount1 protocol: 'glusterfs' server: server-vm1 volname: testvol client: &client1 mountpoint: '' options: '' - &mount2 protocol: 'nfs' server: server-vm1 volname: testvol client: host: client-vm2 mountpoint: '' options: '' num_of_mounts: 1 - &mount3 protocol: 'smb' server: server-vm2 volname: testvol client: &client3 mountpoint: '' options: '' smbuser: 'user1' smbpasswd: 'xyz' num_of_mounts: 3 - &mount4 protocol: 'smb' server: server-vm4 volname: testvol client: host: client-vm4 super_user: 'Administrator' platform: 'windows' mountpoint: '' options: '' smbuser: 'user2' smbpasswd: 'abc' - &mount5 protocol: 'cifs' server: server-vm1 volname: testvol client: host: client-vm2 mountpoint: '' options: '' smbuser: 'user2' smbpasswd: 'abc'