blob: 8b7816a95f4ddd8138d86d1ad011aa12d319e285 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
# 'ocp_servers' is info about ocp master, client and worker nodes.
# This section has to be defined.
ocp_servers:
master:
master_node1_ip:
hostname: master_node1_hostname
client:
client_node1_ip:
hostname: client_node1_hostname
nodes:
ocp_node1_ip:
hostname: ocp_node1_hostname
ocp_node2_ip:
hostname: ocp_node2_hostname
# 'gluster_servers' section covers the details of the nodes where gluster
# servers are run. In the case of CNS, these are the nodes where gluster
# pods are run. In the case of CRS, these are the nodes where gluster
# servers are configured.
# This section has to be defined.
gluster_servers:
gluster_server1_ip:
manage: gluster_server1_hostname
storage: gluster_server1_ip
# 'additional_devices' option is used for couple of test cases
# which test Heketi device "add" operation.
additional_devices: [device3, device4]
gluster_server2_ip:
manage: gluster_server2_hostname
storage: gluster_server2_ip
additional_devices: [device3, device4]
gluster_server3_ip:
manage: gluster_server3_hostname
storage: gluster_server3_ip
additional_devices: [device3, device4]
# 'gluster_registry_servers' section covers the details of the nodes where
# gluster registry servers are run. In the case of CNS, these are the nodes
# where gluster pods are run under the registry namespace. In the case of CRS,
# these are the nodes where registry servers are configured.
gluster_registry_servers:
gluster_registry_server1_ip:
manage: gluster_registry_server1_hostname
storage: registry_server1_ip
gluster_registry_server2_ip:
manage: gluster_registry_server2_hostname
storage: registry_server2_ip
gluster_registry_server3_ip:
manage: gluster_registry_server3_hostname
storage: registry_server3_ip
# 'additional_gluster_servers' section covers the details of the nodes where
# we can run gluster servers. In the case of CNS, these are the nodes where
# gluster pods can be run. In the case of CRS, these are the nodes where
# gluster servers can be configured.
# Note: These nodes are not part of gluster pool. These can be added into
# gluster pool. These can be used where we want to perform node add and remove
# operations.
additional_gluster_servers:
gluster_server4_ip:
manage: gluster_server4_hostname
storage: gluster_server4_ip
devices: [device1, device2]
openshift:
storage_project_name: "<storage-project-name>"
registry_project_name: "<registry-project-name>"
heketi_config:
heketi_dc_name: "<fake-name-of-heketi-deployment-config>"
heketi_service_name: "<fake-name-of-heketi-service>"
heketi_client_node: "<node-ip-with-heketi-client>"
heketi_server_url: "<fake-heketi-server-url>"
heketi_cli_user: "<fake-heketi-cli-user>"
heketi_cli_key: "<fake-heketi-cli-secret>"
registry_heketi_config:
heketi_server_url: "<fake-heketi-server-url>"
dynamic_provisioning:
storage_classes:
file_storage_class:
# 'provisioner' option is no longer supported.
# It gets autocalculated.
resturl: "<fake-url>"
restuser: "<fake-user>"
secretnamespace: "<fake-namespace-name>"
volumenameprefix: "autotests-file"
block_storage_class:
# 'provisioner' option is no longer supported.
# It gets autocalculated.
resturl: "<fake-url>"
restuser: "<fake-user>"
restsecretnamespace: "<fake-namespace-name>"
hacount: "3"
chapauthenabled: "true"
volumenameprefix: "autotests-block"
metrics:
metrics_project_name: "<metrics-project-name>"
metrics_rc_hawkular_cassandra: "<hawkular-cassandra-rc-name>"
metrics_rc_hawkular_metrics: "<hawkular-metrics-rc-name>"
metrics_rc_heapster: "<heapster-rc-name>"
common:
allow_heketi_zones_update: False
check_heketi_db_inconsistencies: True
stop_on_first_failure: False
heketi_command_timeout: 120
scaleup:
run_scale_up_on_start: True
skip_cleanup: True
file_pvc_count: 500
file_pod_count: 100
arbiter_pvc_count: 200
arbiter_pod_count: 100
block_pvc_count: 200
block_pod_count: 100
cloud_provider:
name: '<fake-cloud-provider-name eg. vmware>'
vmware:
hostname: '<fake-hostname>'
username: '<fake-username>'
password: '<fake-password>'
port: 443
aws: # To be done in future
libvirt: # To be done in future
|