blob: c99e9c143ea9afddec906404752d161130a3bb9d (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
|
# 'ocp_servers' is info about ocp master, client and worker nodes.
# This section has to be defined.
ocp_servers:
master:
master_node1_ip:
hostname: master_node1_hostname
client:
client_node1_ip:
hostname: client_node1_hostname
nodes:
ocp_node1_ip:
hostname: ocp_node1_hostname
ocp_node2_ip:
hostname: ocp_node2_hostname
# 'gluster_servers' section covers the details of the nodes where gluster
# servers are run. In the case of CNS, these are the nodes where gluster
# pods are run. In the case of CRS, these are the nodes where gluster
# servers are configured.
# This section has to be defined.
gluster_servers:
gluster_server1_ip:
manage: gluster_server1_hostname
storage: gluster_server1_ip
# 'additional_devices' option is used for couple of test cases
# which test Heketi device "add" operation.
additional_devices: [device3, device4]
gluster_server2_ip:
manage: gluster_server2_hostname
storage: gluster_server2_ip
additional_devices: [device3, device4]
gluster_server3_ip:
manage: gluster_server3_hostname
storage: gluster_server3_ip
additional_devices: [device3, device4]
# 'additional_gluster_servers' section covers the details of the nodes where
# we can run gluster servers. In the case of CNS, these are the nodes where
# gluster pods can be run. In the case of CRS, these are the nodes where
# gluster servers can be configured.
# Note: These nodes are not part of gluster pool. These can be added into
# gluster pool. These can be used where we want to perform node add and remove
# operations.
additional_gluster_servers:
gluster_server4_ip:
manage: gluster_server4_hostname
storage: gluster_server4_ip
devices: [device1, device2]
openshift:
storage_project_name: "<storage-project-name>"
heketi_config:
heketi_dc_name: "<fake-name-of-heketi-deployment-config>"
heketi_service_name: "<fake-name-of-heketi-service>"
heketi_client_node: "<node-ip-with-heketi-client>"
heketi_server_url: "<fake-heketi-server-url>"
heketi_cli_user: "<fake-heketi-cli-user>"
heketi_cli_key: "<fake-heketi-cli-secret>"
dynamic_provisioning:
storage_classes:
file_storage_class:
provisioner: "kubernetes.io/glusterfs"
resturl: "<fake-url>"
restuser: "<fake-user>"
secretnamespace: "<fake-namespace-name>"
volumenameprefix: "autotests-file"
block_storage_class:
provisioner: "gluster.org/glusterblock"
resturl: "<fake-url>"
restuser: "<fake-user>"
restsecretnamespace: "<fake-namespace-name>"
hacount: "3"
chapauthenabled: "true"
volumenameprefix: "autotests-block"
common:
allow_heketi_zones_update: False
stop_on_first_failure: False
heketi_command_timeout: 120
cloud_provider:
name: '<fake-cloud-provider-name eg. vmware>'
vmware:
hostname: '<fake-hostname>'
username: '<fake-username>'
password: '<fake-password>'
port: 443
aws: # To be done in future
libvirt: # To be done in future
|