# 'ocp_servers' is info about ocp master, client and worker nodes. # This section has to be defined. ocp_servers: master: master_node1_ip: hostname: master_node1_hostname client: client_node1_ip: hostname: client_node1_hostname nodes: ocp_node1_ip: hostname: ocp_node1_hostname ocp_node2_ip: hostname: ocp_node2_hostname # 'gluster_servers' section covers the details of the nodes where gluster # servers are run. In the case of CNS, these are the nodes where gluster # pods are run. In the case of CRS, these are the nodes where gluster # servers are configured. # This section has to be defined. gluster_servers: gluster_server1_ip: manage: gluster_server1_hostname storage: gluster_server1_ip # 'additional_devices' option is used for couple of test cases # which test Heketi device "add" operation. additional_devices: [device3, device4] gluster_server2_ip: manage: gluster_server2_hostname storage: gluster_server2_ip additional_devices: [device3, device4] gluster_server3_ip: manage: gluster_server3_hostname storage: gluster_server3_ip additional_devices: [device3, device4] # 'gluster_registry_servers' section covers the details of the nodes where # gluster registry servers are run. In the case of CNS, these are the nodes # where gluster pods are run under the registry namespace. In the case of CRS, # these are the nodes where registry servers are configured. gluster_registry_servers: gluster_registry_server1_ip: manage: gluster_registry_server1_hostname storage: registry_server1_ip gluster_registry_server2_ip: manage: gluster_registry_server2_hostname storage: registry_server2_ip gluster_registry_server3_ip: manage: gluster_registry_server3_hostname storage: registry_server3_ip # 'additional_gluster_servers' section covers the details of the nodes where # we can run gluster servers. In the case of CNS, these are the nodes where # gluster pods can be run. In the case of CRS, these are the nodes where # gluster servers can be configured. # Note: These nodes are not part of gluster pool. These can be added into # gluster pool. These can be used where we want to perform node add and remove # operations. additional_gluster_servers: gluster_server4_ip: manage: gluster_server4_hostname storage: gluster_server4_ip devices: [device1, device2] openshift: storage_project_name: "" registry_project_name: "" heketi_config: heketi_dc_name: "" heketi_service_name: "" heketi_client_node: "" heketi_server_url: "" heketi_cli_user: "" heketi_cli_key: "" registry_heketi_config: heketi_server_url: "" dynamic_provisioning: storage_classes: file_storage_class: # 'provisioner' option is no longer supported. # It gets autocalculated. resturl: "" restuser: "" secretnamespace: "" volumenameprefix: "autotests-file" block_storage_class: # 'provisioner' option is no longer supported. # It gets autocalculated. resturl: "" restuser: "" restsecretnamespace: "" hacount: "3" chapauthenabled: "true" volumenameprefix: "autotests-block" metrics: metrics_project_name: "" metrics_rc_hawkular_cassandra: "" metrics_rc_hawkular_metrics: "" metrics_rc_heapster: "" # 'logging' section covers the details of resources related to OCP logging logging: logging_project_name: "" logging_fluentd_ds: "" logging_es_dc: "" # 'prometheus' section covers the details of resources related to # prometheus prometheus: prometheus_project_name: "" prometheus_resources_selector: "" alertmanager_resources_selector: "" # 'io_container_images' section covers the details of container images # used for I/O io_container_images: cirros: quay.io/libpod/cirros busybox: quay.io/prometheus/busybox common: allow_heketi_zones_update: False check_heketi_db_inconsistencies: True stop_on_first_failure: False heketi_command_timeout: 120 heketi_logs_before_delete: False scaleup: run_scale_up_on_start: True skip_cleanup: True file_pvc_count: 500 file_pod_count: 100 arbiter_pvc_count: 200 arbiter_pod_count: 100 block_pvc_count: 200 block_pod_count: 100 cloud_provider: name: '' vmware: hostname: '' username: '' password: '' port: 443 aws: # To be done in future libvirt: # To be done in future