[DEFAULT] user = password = glusterversion = installpath = downloadpath = # ExportDir Section. # Necessary Options: dir # Optional: fstype, device [export1] dir = fstype = device = options = [export2] dir = fstype = device = # Server Section # Necessary Options: hostname, username, password, glusterversion. # The username, password, glusterversion defaults from DEFAULTS Section and # can be overridden # Optional: installpath [server1] hostname = [server2] hostname = # Brick Section # Necessary Options: hostname, path [brick1] hostname = server1.hostname path = export1 [brick2] hostname = server2.hostname path = export2 # Volume Section # Necessary Options: volumename, bricks # Optional replica=, stripe= # [volume1] volumename = replicate replica=2 stripe= transporttype = tcp bricks = brick1, brick2 # Client Section # Necessary Options: hostname, username, password, glusterversion. # The username, password, glusterversion defaults from DEFAULTS Section and # can be overridden # Optional: installpath [client1] hostname = # MountDevice Section # Necessary Options: hostname, volume # The Server1.hostname could be a VIP also. Need not be a server hostname # IN a general case,(without NFS) we keep the 1st server from serverpool # The volume specified in this section is the "active_volume" onto which all # clients will be mounting to. This active volume and hostname can be changed # during testrun. [mountdevice1] hostname = server1.hostname volumename = volume1.volumename # Mount Section # addMount(dir, type, client, device=master.volume, logfile=None) [mount1] dir = client = device = type = logfile = options = #Geo-replication section #addSlave() [slave1] hostname = path = volumename =