summaryrefslogtreecommitdiffstats
path: root/deployment/playbooks/roles
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2019-02-07 02:08:23 +0530
committerValerii Ponomarov <vponomar@redhat.com>2019-02-07 02:36:02 +0530
commit25fcd9c5aa4c360eff19ef08fc4e2bdff6147ffd (patch)
tree544cf09479861ee7c434a7f9ece19167c14ddf35 /deployment/playbooks/roles
parenta6c7dead0d6ddad4dae93a4292891617b50b44a0 (diff)
Add end-to-end OCP 'deployment' functionality
Add end-to-end deployment tool of OpenShift and OpenShift Container Storage on top of VMWare. Added code is modified version of the 'reference-architecture/vmware-ansible' dir from the following repo: https://github.com/vponomaryov/openshift-ansible-contrib Read 'deployment/README.rst' file for more details about the deployment tool. Change-Id: Ic96f252ff786cc1ecf24d27f0ec47e324131e41b
Diffstat (limited to 'deployment/playbooks/roles')
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml13
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j211
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml8
-rw-r--r--deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml142
-rw-r--r--deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml143
-rw-r--r--deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml157
-rw-r--r--deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml66
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml7
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml34
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j24
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j27
-rw-r--r--deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml15
-rw-r--r--deployment/playbooks/roles/etcd-storage/tasks/main.yaml24
-rw-r--r--deployment/playbooks/roles/gluster-ports/defaults/main.yaml3
-rw-r--r--deployment/playbooks/roles/gluster-ports/tasks/main.yaml34
-rw-r--r--deployment/playbooks/roles/instance-groups/tasks/main.yaml155
-rw-r--r--deployment/playbooks/roles/master-prerequisites/tasks/main.yaml6
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml27
-rw-r--r--deployment/playbooks/roles/package-repos/tasks/main.yaml23
-rw-r--r--deployment/playbooks/roles/prerequisites/defaults/main.yaml6
l---------deployment/playbooks/roles/prerequisites/library/openshift_facts.py1
-rw-r--r--deployment/playbooks/roles/prerequisites/library/rpm_q.py72
-rw-r--r--deployment/playbooks/roles/prerequisites/tasks/main.yaml84
-rw-r--r--deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml14
-rw-r--r--deployment/playbooks/roles/rhsm/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/rhsm/tasks/main.yaml49
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml83
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml29
-rw-r--r--deployment/playbooks/roles/storage-class-configure/tasks/main.yaml22
-rw-r--r--deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j28
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml6
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml77
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j219
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml44
37 files changed, 1409 insertions, 0 deletions
diff --git a/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
new file mode 100644
index 00000000..1b93ce22
--- /dev/null
+++ b/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+- name: create /etc/origin/cloudprovider
+ file:
+ state: directory
+ path: "{{ vsphere_conf_dir }}"
+
+- name: create the vsphere.conf file
+ template:
+ src: "{{ role_path }}/templates/vsphere.conf.j2"
+ dest: /etc/origin/cloudprovider/vsphere.conf
+ owner: root
+ group: root
+ mode: 0644
diff --git a/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2 b/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
new file mode 100644
index 00000000..8abe6e8c
--- /dev/null
+++ b/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
@@ -0,0 +1,11 @@
+[Global]
+user = "{{ vcenter_username }}"
+password = "{{ vcenter_password }}"
+server = "{{ vcenter_host }}"
+port = 443
+insecure-flag = 1
+datacenter = {{ vcenter_datacenter }}
+datastore = {{ vcenter_datastore }}
+working-dir = /{{ vcenter_datacenter }}/vm/{{ vcenter_folder }}/
+[Disk]
+scsicontrollertype = pvscsi
diff --git a/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
new file mode 100644
index 00000000..81511c01
--- /dev/null
+++ b/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
@@ -0,0 +1,3 @@
+---
+vsphere_conf_dir: /etc/origin/cloudprovider
+vsphere_conf: "{{vsphere_conf_dir }}/vsphere.conf"
diff --git a/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..392b5da1
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: Add following nodes to the 'new_nodes' group
+ set_fact:
+ is_add_nodes: true
+
+- name: Import common node creation role
+ import_role:
+ name: create-vm-prod-ose
diff --git a/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..e01f1dd0
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
@@ -0,0 +1,142 @@
+---
+- name: Define set of main disks (system and heketi)
+ set_fact:
+ disks_info: "{{ disks_info | default([
+ {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
+ }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ container_storage_disks.split(',') }}"
+
+- name: Define set of additional disks which will be just attached to nodes
+ set_fact:
+ additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
+
+- name: Create CNS production VMs on vCenter
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: "{{ item.value.guestname }}"
+ cluster: "{{ vcenter_cluster}}"
+ datacenter: "{{ vcenter_datacenter }}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ template: "{{vcenter_template_name}}"
+ state: poweredon
+ wait_for_ip_address: true
+ folder: "/{{ vcenter_folder }}"
+ annotation: "{{ item.value.tag }}"
+ disk: "{{ disks_info }} + {{ additional_disks_info }}"
+ hardware:
+ memory_mb: 32768
+ networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
+ customization:
+ domain: "{{dns_zone}}"
+ dns_suffix: "{{dns_zone}}"
+ hostname: "{{ item.value.guestname}}"
+ with_dict: "{{host_inventory}}"
+ when: "item.value.guesttype in ['cns', ]"
+ async: "{{ 6 * 600 }}"
+ poll: 0
+ register: async_vms_creation
+
+- name: Check async status of VMs creation
+ async_status:
+ jid: "{{ async_result_item.ansible_job_id }}"
+ with_items: "{{ async_vms_creation.results }}"
+ loop_control:
+ loop_var: "async_result_item"
+ register: async_poll_results
+ until: async_poll_results.finished
+ retries: "{{ 6 * 100 }}"
+
+- name: Read info of newly created VMs
+ vmware_guest_tools_wait:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ vcenter_folder }}"
+ validate_certs: False
+ uuid: "{{ item.instance.hw_product_uuid }}"
+ with_items: "{{ async_poll_results.results }}"
+ register: facts
+
+- name: Map node names and their IP addresses
+ set_fact:
+ ip4addrs: "{{ ip4addrs | default({}) | combine(
+ {item.instance.hw_name: (
+ item.instance.hw_eth0.ipaddresses | ipv4 | first)},
+ recursive=True) }}"
+ hostnames_for_reboot: "{{
+ (hostnames_for_reboot | default([])) +
+ [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
+ with_items: "{{ facts.results }}"
+
+- name: Define glusterfs devices
+ set_fact:
+ glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
+ {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
+ with_indexed_items: "{{ disks_info[3::] }}"
+
+- name: Define glusterfs additional devices
+ set_fact:
+ glusterfs_additional_devices: "{{
+ glusterfs_additional_devices | default([])
+ }} + {{
+ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
+ }}"
+ with_indexed_items: "{{ additional_disks_info }}"
+
+- name: Add CNS production VMs to inventory
+ add_host:
+ hostname: "{{ item.value.guestname }}"
+ ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
+ ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
+ groups: "{{ item.value.tag }}, new_nodes, storage, cns, glusterfs"
+ openshift_node_group_name: "node-config-storage"
+ # Following vars are for 'openshift_storage_glusterfs' role from
+ # 'openshift/openshift-ansible' repo
+ glusterfs_devices: "{{ glusterfs_devices }}"
+ glusterfs_hostname: "{{ item.value.guestname }}"
+ glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
+ glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['cns', ]"
+
+# Following updates config file
+# which is required for automated tests from 'glusterfs-containers-tests' repo
+
+- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
+ set_fact:
+ gluster_servers: "{{
+ gluster_servers | default({}) | combine({
+ ip4addrs[item.value.guestname]: {
+ 'manage': item.value.guestname,
+ 'storage': ip4addrs[item.value.guestname],
+ 'additional_devices': glusterfs_additional_devices,
+ }
+ })
+ }}"
+ with_dict: "{{ host_inventory }}"
+ when:
+ - item.value.guesttype in ['cns', ]
+ - cns_automation_config_file_path | length > 0
+
+- name: Update 'glusterfs-containers-tests' config file
+ yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: gluster_servers
+ value: "{{ gluster_servers }}"
+ when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..05aa63bb
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
@@ -0,0 +1,143 @@
+---
+- name: Define set of main disks (system and heketi)
+ set_fact:
+ disks_info: "{{ disks_info | default([
+ {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
+ }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ container_storage_disks.split(',') }}"
+
+- name: Define set of additional disks which will be just attached to nodes
+ set_fact:
+ additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
+
+- name: Create CRS production VMs on vCenter
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: "{{ item.value.guestname }}"
+ cluster: "{{ vcenter_cluster}}"
+ datacenter: "{{ vcenter_datacenter }}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ template: "{{vcenter_template_name}}"
+ state: poweredon
+ wait_for_ip_address: true
+ folder: "/{{ vcenter_folder }}"
+ annotation: "{{ cluster_id }}-crs"
+ disk: "{{ disks_info }} + {{ additional_disks_info }}"
+ hardware:
+ memory_mb: 32768
+ networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
+ customization:
+ domain: "{{dns_zone}}"
+ dns_suffix: "{{dns_zone}}"
+ hostname: "{{ item.value.guestname}}"
+ with_dict: "{{host_inventory}}"
+ when: "item.value.guesttype in ['crs', ]"
+ async: "{{ 6 * 600 }}"
+ poll: 0
+ register: async_vms_creation
+
+- name: Check async status of VMs creation
+ async_status:
+ jid: "{{ async_result_item.ansible_job_id }}"
+ with_items: "{{ async_vms_creation.results }}"
+ loop_control:
+ loop_var: "async_result_item"
+ register: async_poll_results
+ until: async_poll_results.finished
+ retries: "{{ 6 * 100 }}"
+
+- name: Read info of newly created VMs
+ vmware_guest_tools_wait:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ vcenter_folder }}"
+ validate_certs: False
+ uuid: "{{ item.instance.hw_product_uuid }}"
+ with_items: "{{ async_poll_results.results }}"
+ register: facts
+
+- name: Map node names and their IP addresses
+ set_fact:
+ ip4addrs: "{{ ip4addrs | default({}) | combine(
+ {item.instance.hw_name: (
+ item.instance.hw_eth0.ipaddresses | ipv4 | first)},
+ recursive=True) }}"
+ hostnames_for_reboot: "{{
+ (hostnames_for_reboot | default([])) +
+ [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
+ with_items: "{{ facts.results }}"
+
+- name: Define glusterfs devices
+ set_fact:
+ glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
+ {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
+ with_indexed_items: "{{ disks_info[3::] }}"
+
+- name: Define glusterfs additional devices
+ set_fact:
+ glusterfs_additional_devices: "{{
+ glusterfs_additional_devices | default([])
+ }} + {{
+ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
+ }}"
+ with_indexed_items: "{{ additional_disks_info }}"
+
+- name: Add CRS production VMs to inventory
+ add_host:
+ hostname: "{{ item.value.guestname }}"
+ ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
+ ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
+ openshift_node_group_name: "node-config-storage"
+ # old groups are: crs, production_group, {{cluster-id}}-crs
+ groups: "{{ cluster_id }}-crs, crs, storage, glusterfs"
+ # Following vars are for 'openshift_storage_glusterfs' role from
+ # 'openshift/openshift-ansible' repo
+ glusterfs_devices: "{{ glusterfs_devices }}"
+ glusterfs_hostname: "{{ item.value.guestname }}"
+ glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
+ glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['crs', ]"
+
+# Following updates config file
+# which is required for automated tests from 'glusterfs-containers-tests' repo
+
+- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
+ set_fact:
+ gluster_servers: "{{
+ gluster_servers | default({}) | combine({
+ ip4addrs[item.value.guestname]: {
+ 'manage': item.value.guestname,
+ 'storage': ip4addrs[item.value.guestname],
+ 'additional_devices': glusterfs_additional_devices,
+ }
+ })
+ }}"
+ with_dict: "{{ host_inventory }}"
+ when:
+ - item.value.guesttype in ['crs', ]
+ - cns_automation_config_file_path | length > 0
+
+- name: Update 'glusterfs-containers-tests' config file
+ yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: gluster_servers
+ value: "{{ gluster_servers }}"
+ when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..a0124348
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
@@ -0,0 +1,157 @@
+---
+- name: Get to know whether we need to add following nodes to "new_nodes" group or not
+ set_fact:
+ is_add_nodes: "{{ is_add_nodes | default(false) }}"
+
+- name: Define memory and disk parameters per node type
+ set_fact:
+ host_data:
+ master:
+ memory: 16384
+ disk:
+ - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ compute:
+ memory: "{{ ('cns' in container_storage) | ternary(32768, 8192) }}"
+ disk:
+ - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+
+- name: Create production VMs on vCenter
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: "{{ item.value.guestname }}"
+ cluster: "{{ vcenter_cluster}}"
+ datacenter: "{{ vcenter_datacenter }}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ template: "{{vcenter_template_name}}"
+ state: poweredon
+ wait_for_ip_address: true
+ folder: "/{{ vcenter_folder }}"
+ annotation: "{{ item.value.tag }}"
+ disk: "{{ host_data[item.value.guesttype].disk }}"
+ hardware:
+ memory_mb: "{{ host_data[item.value.guesttype].memory }}"
+ networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
+ customization:
+ domain: "{{dns_zone}}"
+ dns_suffix: "{{ dns_zone }}"
+ hostname: "{{ item.value.guestname }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['compute', 'master']"
+ async: "{{ 6 * 600 }}"
+ poll: 0
+ register: async_vms_creation
+
+- name: Check async status of VMs creation
+ async_status:
+ jid: "{{ async_result_item.ansible_job_id }}"
+ with_items: "{{ async_vms_creation.results }}"
+ loop_control:
+ loop_var: "async_result_item"
+ register: async_poll_results
+ until: async_poll_results.finished
+ retries: "{{ 6 * 100 }}"
+
+- name: Read info of newly created VMs
+ vmware_guest_tools_wait:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ vcenter_folder }}"
+ validate_certs: False
+ uuid: "{{ item.instance.hw_product_uuid }}"
+ with_items: "{{ async_poll_results.results }}"
+ register: facts
+
+- name: Map node names and their IP addresses
+ set_fact:
+ ip4addrs: "{{ ip4addrs | default({}) | combine(
+ {item.instance.hw_name: (
+ item.instance.hw_eth0.ipaddresses | ipv4 | first)},
+ recursive=True) }}"
+ hostnames_for_reboot: "{{
+ (hostnames_for_reboot | default([])) +
+ [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
+ with_items: "{{ facts.results }}"
+
+- name: Add production VMs to inventory
+ add_host:
+ hostname: "{{ item.value.guestname }}"
+ ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
+ ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
+ groups: "{{ item.value.tag }}, production_group{{ is_add_nodes | ternary(', new_nodes', '')}}"
+ openshift_node_group_name: "{{
+ (item.value.guesttype == 'master') | ternary('node-config-master',
+ 'node-config-compute') }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['compute', 'master']"
+
+# Following updates config file
+# which is required for automated tests from 'glusterfs-containers-tests' repo
+
+- name: Gather data about existing master nodes for tests config file
+ set_fact:
+ ocp_master_and_client_nodes: "{{
+ ocp_master_and_client_nodes | default({}) | combine({
+ (
+ ((
+ (hostvars[item].guest | default({'net': [{
+ 'network': vm_network,
+ 'ipaddress': [
+ ip4addrs[hostvars[item].inventory_hostname_short]
+ ]
+ }]})).net | selectattr('network', 'equalto', vm_network)
+ ) | list)[0].ipaddress | ipv4 | first
+ ): {
+ 'hostname': hostvars[item].inventory_hostname_short,
+ }
+ })
+ }}"
+ with_items: "{{ groups[cluster_id + '-master'] }}"
+ when: cns_automation_config_file_path | length > 0
+
+- name: Gather data about existing compute nodes for tests config file
+ set_fact:
+ ocp_compute_nodes: "{{
+ ocp_compute_nodes | default({}) | combine({
+ (
+ ((
+ (hostvars[item].guest | default({'net': [{
+ 'network': vm_network,
+ 'ipaddress': [
+ ip4addrs[hostvars[item].inventory_hostname_short]
+ ]
+ }]})).net | selectattr('network', 'equalto', vm_network)
+ ) | list)[0].ipaddress | ipv4 | first
+ ): {
+ 'hostname': hostvars[item].inventory_hostname_short,
+ }
+ })
+ }}"
+ with_items: "{{ groups[cluster_id + '-compute'] | default([]) }} "
+ when: cns_automation_config_file_path | length > 0
+
+- name: Update 'glusterfs-containers-tests' config file
+ yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: ocp_servers
+ value:
+ master: "{{ ocp_master_and_client_nodes }}"
+ client: "{{ ocp_master_and_client_nodes }}"
+ nodes: "{{ ocp_compute_nodes }}"
+ - key: openshift.heketi_config.heketi_client_node
+ value: "{{ ocp_master_and_client_nodes.keys()[0] }}"
+ - key: openshift.heketi_config.heketi_server_url
+ value: "http://{{ ocp_master_and_client_nodes.keys()[0] }}:8080"
+ when:
+ - ocp_master_and_client_nodes is defined
+ - ocp_compute_nodes is defined
diff --git a/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml b/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
new file mode 100644
index 00000000..dfe5e649
--- /dev/null
+++ b/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
@@ -0,0 +1,66 @@
+---
+- name: Clear yum cache
+ command: "yum clean all"
+ ignore_errors: true
+
+- name: Install required common rpms
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - 'iptables'
+ - 'iptables-services'
+ retries: 5
+ delay: 5
+ register: result
+ until: result is succeeded
+
+- name: Enable Gluster 3 repo
+ import_role:
+ name: enable-gluster-repo
+
+- name: Install required Gluster 3 rpms
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - 'redhat-storage-server'
+ - 'heketi-client'
+ retries: 5
+ delay: 5
+ register: result
+ until: result is succeeded
+
+- name: Install gluster-block package
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - 'gluster-block'
+ retries: 5
+ delay: 5
+ ignore_errors: yes
+
+- name: Stop firewalld
+ service:
+ name: firewalld
+ state: stopped
+ enabled: no
+
+- name: Start Glusterd and iptables
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: true
+ with_items:
+ - iptables
+ - glusterd
+
+- name: Start gluster-blockd service
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: true
+ with_items:
+ - gluster-blockd
+ ignore_errors: yes
diff --git a/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml b/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
new file mode 100644
index 00000000..062f543a
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+docker_dev: "/dev/sdb"
+docker_vg: "docker-vol"
+docker_data_size: "95%VG"
+docker_dm_basesize: "3G"
+container_root_lv_name: "dockerlv"
+container_root_lv_mount_path: "/var/lib/docker"
diff --git a/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml b/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
new file mode 100644
index 00000000..d8fd457e
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+- name: remove any existing docker-storage config file
+ file:
+ path: /etc/sysconfig/docker-storage
+ state: absent
+
+- block:
+ - name: create the docker-storage config file
+ template:
+ src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2"
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0644
+
+ when:
+ - ansible_distribution_version | version_compare('7.4', '>=')
+ - ansible_distribution == "RedHat"
+
+- block:
+ - name: create the docker-storage-setup config file
+ template:
+ src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0644
+
+ when:
+ - ansible_distribution_version | version_compare('7.4', '<')
+ - ansible_distribution == "RedHat"
+
+- name: start docker
+ service: name=docker state=started enabled=true
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
new file mode 100644
index 00000000..b5869fef
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
@@ -0,0 +1,4 @@
+DEVS="{{ docker_dev }}"
+VG="{{ docker_vg }}"
+DATA_SIZE="{{ docker_data_size }}"
+EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
new file mode 100644
index 00000000..61ba30af
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
@@ -0,0 +1,7 @@
+DEVS="{{ docker_dev }}"
+VG="{{ docker_vg }}"
+DATA_SIZE="{{ docker_data_size }}"
+STORAGE_DRIVER=overlay2
+CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
+CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
+CONTAINER_ROOT_LV_SIZE=100%FREE \ No newline at end of file
diff --git a/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml b/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
new file mode 100644
index 00000000..7236d77d
--- /dev/null
+++ b/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
@@ -0,0 +1,15 @@
+---
+- name: Enable main Gluster 3 repo with GA packages
+ command: "subscription-manager repos --enable=rh-gluster-3-for-rhel-7-server-rpms"
+# when: gluster_puddle_repo == ''
+
+- name: Create additional repo with downstream packages for Gluster 3
+ yum_repository:
+ name: "downstream-rh-gluster-3-for-rhel-7-server-rpms"
+ baseurl: "{{ gluster_puddle_repo }}"
+ description: "Downstream repo with development versions of packages for Gluster 3"
+ enabled: "yes"
+ gpgcheck: "no"
+ sslverify: "no"
+ cost: 990
+ when: gluster_puddle_repo != ''
diff --git a/deployment/playbooks/roles/etcd-storage/tasks/main.yaml b/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
new file mode 100644
index 00000000..fe13dc17
--- /dev/null
+++ b/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
@@ -0,0 +1,24 @@
+---
+- name: Create openshift volume group
+ lvg: vg=etcd_vg pvs=/dev/sdd
+
+- name: Create lvm volumes
+ lvol: vg=etcd_vg lv=etcd_lv size=95%FREE state=present shrink=no
+
+- name: Create local partition on lvm lv
+ filesystem:
+ fstype: xfs
+ dev: /dev/etcd_vg/etcd_lv
+
+- name: Make mounts owned by nfsnobody
+ file: path=/var/lib/etcd state=directory mode=0755
+
+- name: Mount the partition
+ mount:
+ name: /var/lib/etcd
+ src: /dev/etcd_vg/etcd_lv
+ fstype: xfs
+ state: present
+
+- name: Remount new partition
+ command: "mount -a"
diff --git a/deployment/playbooks/roles/gluster-ports/defaults/main.yaml b/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
new file mode 100644
index 00000000..fadcb096
--- /dev/null
+++ b/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+gluster_ports: ['24007', '24008', '2222', '49152:49664', '24010', '3260', '111']
+crs_ports: ['8080']
diff --git a/deployment/playbooks/roles/gluster-ports/tasks/main.yaml b/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
new file mode 100644
index 00000000..a3f0565b
--- /dev/null
+++ b/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+- name: open gluster ports
+ iptables:
+ chain: INPUT
+ destination_port: "{{ item }}"
+ jump: ACCEPT
+ ctstate: NEW
+ protocol: tcp
+ action: insert
+ match: tcp
+ with_items: "{{ gluster_ports }}"
+ when: groups['storage'] is defined and groups['storage'] != []
+ register: rule
+
+- name: save iptables
+ shell: iptables-save > /etc/sysconfig/iptables
+ when: rule|changed
+
+- name: open gluster ports
+ iptables:
+ chain: INPUT
+ destination_port: "{{ item }}"
+ ctstate: NEW
+ jump: ACCEPT
+ protocol: tcp
+ action: insert
+ match: tcp
+ with_items: "{{ crs_ports }}"
+ when: groups['crs'] is defined and groups['crs'] != []
+ register: heketi
+
+- name: save iptables
+ shell: iptables-save > /etc/sysconfig/iptables
+ when: heketi|changed
diff --git a/deployment/playbooks/roles/instance-groups/tasks/main.yaml b/deployment/playbooks/roles/instance-groups/tasks/main.yaml
new file mode 100644
index 00000000..f8da4217
--- /dev/null
+++ b/deployment/playbooks/roles/instance-groups/tasks/main.yaml
@@ -0,0 +1,155 @@
+---
+# create rhsm_user, rhsm_password, rhsm_subscription_pool and
+# rhsm_server for functionality with older rhsm_user
+- name: Set deprecated fact for rhel_subscription_user
+ set_fact:
+ rhsm_user: "{{ rhel_subscription_user }}"
+ when: rhel_subscription_user is defined
+
+- name: Set deprecated fact for rhel_subscription_pass
+ set_fact:
+ rhsm_password: "{{ rhel_subscription_pass }}"
+ when: rhel_subscription_pass is defined
+
+- name: Set deprecated fact for rhel_subscription_pool
+ set_fact:
+ rhsm_pool: "{{ rhel_subscription_pool }}"
+ when: rhel_subscription_pool is defined
+
+- name: Add masters to requisite groups
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, masters, etcd, nodes, cluster_hosts, master
+ openshift_node_group_name: "node-config-master"
+ with_items: "{{ groups[cluster_id + '-master'] }}"
+ when:
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add masters to requisite groups
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, masters, etcd, nodes, cluster_hosts, master
+ openshift_node_group_name: "node-config-master"
+ openshift_node_labels:
+ role: master
+ node-role.kubernetes.io/master: true
+ with_items: "{{ groups[cluster_id + '-master'] }}"
+ when:
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add a master to the single master group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: single_master
+ openshift_node_group_name: "node-config-master"
+ with_items: "{{ groups[cluster_id + '-master'][0] }}"
+ when:
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add a master to the single master group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: single_master
+ openshift_node_group_name: "node-config-master"
+ openshift_node_labels:
+ role: master
+ node-role.kubernetes.io/master: true
+ with_items: "{{ groups[cluster_id + '-master'][0] }}"
+ when:
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add compute instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute
+ openshift_node_group_name: "node-config-compute"
+ with_items: "{{ groups[cluster_id + '-compute'] }}"
+ when:
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add compute instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute
+ openshift_node_group_name: "node-config-compute"
+ openshift_node_labels:
+ role: compute
+ node-role.kubernetes.io/compute: true
+ node-role.kubernetes.io/infra: true
+ with_items: "{{ groups[cluster_id + '-compute'] }}"
+ when:
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add new node instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, new_nodes
+ openshift_node_group_name: "node-config-compute"
+ with_items: "{{ groups.tag_provision_node | default([]) }}"
+ when:
+ - add_node is defined
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add new node instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, new_nodes
+ openshift_node_group_name: "node-config-compute"
+ openshift_node_labels:
+ role: "{{ node_type }}"
+ node-role.kubernetes.io/compute: true
+ node-role.kubernetes.io/infra: true
+ with_items: "{{ groups.tag_provision_node | default([]) }}"
+ when:
+ - add_node is defined
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add cns instances to allnodes
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-storage'] | default([]) }}"
+
+- name: Add crs instances to allnodes
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-crs'] | default([]) }}"
+
+- name: Add cns instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: nodes, cluster_hosts, schedulable_nodes, storage
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-storage'] }}"
+ when:
+ - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add cns instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: nodes, cluster_hosts, schedulable_nodes, storage
+ openshift_node_labels:
+ role: storage
+ node-role.kubernetes.io/storage: true
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-storage'] }}"
+ when:
+ - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add crs nodes to the storage group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: storage, crs
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-crs'] }}"
+ when:
+ - "'crs' in container_storage and add_node is defined and 'storage' in node_type"
+
+- name: Add a crs node to the single crs group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: single_crs
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-crs'][0] }}"
+ when:
+ - "'crs' in container_storage and add_node is defined and 'storage' in node_type"
diff --git a/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml b/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
new file mode 100644
index 00000000..de9230d1
--- /dev/null
+++ b/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: Install git
+ package:
+ name: git
+ state: latest
+ when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
new file mode 100644
index 00000000..cd74c20e
--- /dev/null
+++ b/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+local_volumes_device: "/dev/sdc"
+local_volumes_fstype: "xfs"
+local_volumes_fsopts: "gquota"
+local_volumes_path: "/var/lib/origin/openshift.local.volumes"
diff --git a/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
new file mode 100644
index 00000000..df58fe80
--- /dev/null
+++ b/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Create filesystem for /var/lib/origin/openshift.local.volumes
+ filesystem:
+ fstype: "{{ local_volumes_fstype }}"
+ dev: "{{ local_volumes_device }}"
+
+- name: Create local volumes directory
+ file:
+ path: "{{ local_volumes_path }}"
+ state: directory
+ recurse: yes
+
+- name: Create fstab entry
+ mount:
+ name: "{{ local_volumes_path }}"
+ src: "{{ local_volumes_device }}"
+ fstype: "{{ local_volumes_fstype }}"
+ opts: "{{ local_volumes_fsopts }}"
+ state: present
+
+- name: Mount fstab entry
+ mount:
+ name: "{{ local_volumes_path }}"
+ src: "{{ local_volumes_device }}"
+ fstype: "{{ local_volumes_fstype }}"
+ opts: "{{ local_volumes_fsopts }}"
+ state: mounted
diff --git a/deployment/playbooks/roles/package-repos/tasks/main.yaml b/deployment/playbooks/roles/package-repos/tasks/main.yaml
new file mode 100644
index 00000000..3492a9e4
--- /dev/null
+++ b/deployment/playbooks/roles/package-repos/tasks/main.yaml
@@ -0,0 +1,23 @@
+---
+- name: Import RHSM role
+ import_role:
+ name: rhsm
+
+- name: Evaluate OCP repo name
+ set_fact:
+ tmp_ose_repo_name: "rhel-7-server-ose-3.{{ openshift_vers.split('_')[-1] }}-rpms"
+
+- name: Disable OpenShift 3.X GA repo
+ command: "subscription-manager repos --disable={{ tmp_ose_repo_name }}"
+ when: (ose_puddle_repo != '') or ('crs' in group_names)
+
+- name: Create additional repo with downstream packages for OpenShift 3.X
+ yum_repository:
+ name: "downstream-{{ tmp_ose_repo_name }}"
+ baseurl: "{{ ose_puddle_repo }}"
+ description: "Downstream repo with development versions of packages for OpenShift"
+ enabled: "{{ (ose_puddle_repo != '') | ternary('yes', 'no') }}"
+ gpgcheck: "no"
+ sslverify: "no"
+ cost: 900
+ when: (ose_puddle_repo != '') and ('crs' not in group_names)
diff --git a/deployment/playbooks/roles/prerequisites/defaults/main.yaml b/deployment/playbooks/roles/prerequisites/defaults/main.yaml
new file mode 100644
index 00000000..1705ee4f
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/defaults/main.yaml
@@ -0,0 +1,6 @@
+---
+openshift_required_packages:
+- iptables
+- iptables-services
+- NetworkManager
+- docker{{ '-' + docker_version if docker_version is defined else '' }}
diff --git a/deployment/playbooks/roles/prerequisites/library/openshift_facts.py b/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
new file mode 120000
index 00000000..e0061bb7
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
@@ -0,0 +1 @@
+/usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py \ No newline at end of file
diff --git a/deployment/playbooks/roles/prerequisites/library/rpm_q.py b/deployment/playbooks/roles/prerequisites/library/rpm_q.py
new file mode 100644
index 00000000..3dec50fc
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/library/rpm_q.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tobias Florek <tob@butter.sh>
+# Licensed under the terms of the MIT License
+"""
+An ansible module to query the RPM database. For use, when yum/dnf are not
+available.
+"""
+
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import * # noqa: F403
+
+DOCUMENTATION = """
+---
+module: rpm_q
+short_description: Query the RPM database
+author: Tobias Florek
+options:
+ name:
+ description:
+ - The name of the package to query
+ required: true
+ state:
+ description:
+ - Whether the package is supposed to be installed or not
+ choices: [present, absent]
+ default: present
+"""
+
+EXAMPLES = """
+- rpm_q: name=ansible state=present
+- rpm_q: name=ansible state=absent
+"""
+
+RPM_BINARY = '/bin/rpm'
+
+
+def main():
+ """
+ Checks rpm -q for the named package and returns the installed packages
+ or None if not installed.
+ """
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # pylint: disable=invalid-name
+ rc, out, err = module.run_command([RPM_BINARY, '-q', name])
+
+ installed = out.rstrip('\n').split('\n')
+
+ if rc != 0:
+ if state == 'present':
+ module.fail_json(msg="%s is not installed" % name, stdout=out, stderr=err, rc=rc)
+ else:
+ module.exit_json(changed=False)
+ elif state == 'present':
+ module.exit_json(changed=False, installed_versions=installed)
+ else:
+ module.fail_json(msg="%s is installed", installed_versions=installed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deployment/playbooks/roles/prerequisites/tasks/main.yaml b/deployment/playbooks/roles/prerequisites/tasks/main.yaml
new file mode 100644
index 00000000..a2686796
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/tasks/main.yaml
@@ -0,0 +1,84 @@
+---
+- name: Gather facts
+ openshift_facts:
+ role: common
+
+- block:
+ - name: Clear yum cache
+ command: "yum clean all"
+ ignore_errors: true
+
+ - name: Install the required rpms
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items: "{{ openshift_required_packages }}"
+
+ - name: Start NetworkManager and network
+ service:
+ name: "{{ item }}"
+ state: restarted
+ enabled: true
+ with_items:
+ - NetworkManager
+ - network
+
+ - name: Determine if firewalld is installed
+ rpm_q:
+ name: "firewalld"
+ state: present
+ register: firewalld_installed
+ failed_when: false
+
+ - name: Stop firewalld
+ service:
+ name: firewalld
+ state: stopped
+ enabled: false
+ when:
+ - "{{ firewalld_installed.installed_versions | default([]) | length > 0 }}"
+
+ - name: Start iptables
+ service:
+ name: iptables
+ state: started
+ enabled: true
+
+ - name: Start docker
+ service:
+ name: docker
+ state: started
+ enabled: true
+
+ when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
+
+# Fail as early as possible if Atomic and old version of Docker
+- block:
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - assert:
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Attempting to patch.
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+
+ rescue:
+ - name: Patching Atomic instances
+ shell: atomic host upgrade
+ register: patched
+
+ - name: Reboot when patched
+ shell: sleep 5 && shutdown -r now "Reboot due to Atomic Patching"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ when: patched.changed
+
+ - name: Wait for hosts to be back
+ pause:
+ seconds: 60
+ delegate_to: 127.0.0.1
+ when: patched.changed
+
+ when: (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml b/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
new file mode 100644
index 00000000..9b9f3b21
--- /dev/null
+++ b/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
@@ -0,0 +1,14 @@
+---
+- block:
+ - name: Is the host already registered?
+ command: "subscription-manager list"
+ register: subscribed
+ ignore_errors: yes
+
+ - name: Unregister host
+ redhat_subscription:
+ state: absent
+ when: "'Subscribed' in subscribed.stdout"
+ ignore_errors: yes
+
+ when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/rhsm/defaults/main.yaml b/deployment/playbooks/roles/rhsm/defaults/main.yaml
new file mode 100644
index 00000000..3207411f
--- /dev/null
+++ b/deployment/playbooks/roles/rhsm/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+openshift_required_repos:
+- 'rhel-7-server-rpms'
+- 'rhel-7-server-extras-rpms'
+- 'rhel-7-fast-datapath-rpms'
diff --git a/deployment/playbooks/roles/rhsm/tasks/main.yaml b/deployment/playbooks/roles/rhsm/tasks/main.yaml
new file mode 100644
index 00000000..f793fb2f
--- /dev/null
+++ b/deployment/playbooks/roles/rhsm/tasks/main.yaml
@@ -0,0 +1,49 @@
+---
+- block:
+ - name: Allow rhsm a longer timeout to help out with subscription-manager
+ lineinfile:
+ dest: /etc/rhsm/rhsm.conf
+ line: 'server_timeout=600'
+ insertafter: '^proxy_password ='
+
+ - name: Is the system already registered?
+ command: "subscription-manager version"
+ register: subscribed
+
+ - name: Unregister system if registered
+ import_role:
+ name: rhsm-unregister
+ when:
+ - "'not registered' not in subscribed.stdout"
+
+ - name: Register system using Red Hat Subscription Manager
+ redhat_subscription:
+ state: present
+ username: "{{ rhsm_user | default(omit) }}"
+ password: "{{ rhsm_password | default(omit) }}"
+ pool: "{{ rhsm_pool | default(omit) }}"
+ server_hostname: "{{ rhsm_satellite | default(omit) }}"
+ when:
+ - "'not registered' in subscribed.stdout"
+ - rhsm_user is defined
+ - rhsm_user|trim != ''
+ register: rhn
+ until: rhn|success
+ retries: 5
+
+ - name: Obtain currently enabled repos
+ shell: 'subscription-manager repos --list-enabled | sed -ne "s/^Repo ID:[^a-zA-Z0-9]*\(.*\)/\1/p"'
+ register: enabled_repos
+
+ - name: Disable repositories that should not be enabled
+ shell: "subscription-manager repos --disable={{ item }}"
+ with_items:
+ - "{{ enabled_repos.stdout_lines | difference(openshift_required_repos) }}"
+ when: provider is not defined
+
+ - name: Enable specified repositories not already enabled
+ command: "subscription-manager repos --enable={{ item }}"
+ with_items:
+ - "{{ openshift_required_repos | difference(enabled_repos.stdout_lines) }}"
+
+ when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
new file mode 100644
index 00000000..e9e06809
--- /dev/null
+++ b/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
@@ -0,0 +1,83 @@
+---
+# NOTE(vponomar): here we use 2 different sources of IP addresses:
+# 1) hostvars[item].guest.net exists for old nodes, that haven't been created
+# with this playbook run. Such nodes have detailed info in hostvars.
+# 2) hostvars[item].ansible_ssh_host is always correct IP address for newly
+# created nodes. For such nodes we pick it when variant 1 does not work.
+- name: Save matched hosts to temporary var
+ set_fact:
+ current_cluster_hosts: "{{
+ current_cluster_hosts | default([]) | union([{
+ 'name_short': hostvars[item].inventory_hostname_short,
+ 'name': hostvars[item].inventory_hostname,
+ 'net': (hostvars[item].guest | default({})).net | default(
+ [{'network': vm_network,
+ 'ipaddress': [hostvars[item].ansible_ssh_host]}])
+ }]) }}"
+ with_items: "{{ groups.all | select('match', ocp_hostname_prefix) | list }}"
+
+- name: Gather current cluster IP addresses
+ set_fact:
+ current_cluster_ips: "{{
+ current_cluster_ips | default({}) | combine({
+ (item.1.ipaddress | ipv4 | first): [item.0.name_short, item.0.name]
+ }) }}"
+ with_subelements: ["{{ current_cluster_hosts }}", net]
+ when: "item.1.network == vm_network"
+
+- name: Get current user home dir
+ shell: 'eval echo "~$USER"'
+ register: home_dir
+- name: Set hosts files paths
+ set_fact:
+ home_hosts_file: "{{ home_dir.stdout_lines[0] + '/.ssh/config' }}"
+ system_hosts_file: "/etc/hosts"
+- name: Check 'write' permissions for system hosts file
+ stat:
+ path: "{{ system_hosts_file }}"
+ register: stat_system_hosts
+
+- name: Update system hosts file if writeable
+ block:
+ - name: Delete old left-overs if exist
+ lineinfile:
+ dest: "{{ system_hosts_file }}"
+ regexp: '{{ item.name_short }}'
+ state: absent
+ create: true
+ with_items: "{{ current_cluster_hosts }}"
+ - name: Add domain name mapping of new cluster nodes to the system hosts file
+ lineinfile:
+ dest: "{{ system_hosts_file }}"
+ line: '{{ item.key }} {{ item.value.0 }} {{ item.value.1 }}'
+ create: true
+ with_dict: "{{ current_cluster_ips }}"
+ when: "stat_system_hosts.stat.writeable"
+
+- name: Update user's SSH hosts file
+ block:
+ - name: Delete old left-overs if exist
+ lineinfile:
+ path: "{{ home_hosts_file }}"
+ state: absent
+ regexp: "{{ item.key }}"
+ create: true
+ mode: '644'
+ with_dict: "{{ current_cluster_ips }}"
+ - name: Write line with option group
+ lineinfile:
+ dest: "{{ home_hosts_file }}"
+ state: present
+ line: "Host {{ item.value.0 }} {{ item.value.1 }}"
+ create: true
+ mode: '644'
+ with_dict: "{{ current_cluster_ips }}"
+ - name: Write line with hostname option
+ lineinfile:
+ dest: "{{ home_hosts_file }}"
+ state: present
+ line: " HostName {{ item.key }}"
+ insertafter: "Host {{ item.value.0 }} {{ item.value.1 }}"
+ create: true
+ mode: '644'
+ with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
new file mode 100644
index 00000000..d53fa43f
--- /dev/null
+++ b/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
@@ -0,0 +1,29 @@
+---
+- name: Import role with update of /etc/hosts file
+ import_role:
+ name: setup-custom-domain-names-for-ansible-runner
+
+- name: Create directory for dnsmasq config file if absent
+ file:
+ dest: /etc/dnsmasq.d
+ state: directory
+ mode: 0644
+
+- name: Create custom dnsmasq config file for current cluster
+ file:
+ dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
+ state: touch
+
+- name: Remove stale data from custom dnsmasq config file is exist
+ lineinfile:
+ dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
+ regexp: "{{ item.value.0 }}"
+ state: absent
+ with_dict: "{{ current_cluster_ips }}"
+
+- name: Write data to custom dnsmasq config file
+ lineinfile:
+ dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
+ line: "address=/{{ item.value.0 }}/{{ item.key }}\naddress=/{{ item.value.1 }}/{{ item.key }}"
+ state: present
+ with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml b/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
new file mode 100644
index 00000000..d42484e0
--- /dev/null
+++ b/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
@@ -0,0 +1,22 @@
+---
+- name: Copy cloud provider storage class file
+ template:
+ src: cloud-provider-storage-class.yaml.j2
+ dest: ~/cloud-provider-storage-class.yaml
+
+- name: Copy cloud provider storage class file to single master
+ fetch:
+ src: ~/cloud-provider-storage-class.yaml
+ dest: ~/cloud-provider-storage-class.yaml
+ flat: yes
+
+- name: Switch to default project
+ command: oc project default
+
+- name: Check to see if storage class is already created
+ command: "oc get storageclass"
+ register: storage_class
+
+- name: Create storage class
+ command: "oc create -f ~/cloud-provider-storage-class.yaml"
+ when: "'{{ vcenter_datastore }}' not in storage_class.stdout"
diff --git a/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2 b/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
new file mode 100644
index 00000000..e31d53a4
--- /dev/null
+++ b/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
@@ -0,0 +1,8 @@
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: "{{ vcenter_datastore }}"
+provisioner: kubernetes.io/vsphere-volume
+parameters:
+ diskformat: zeroedthick
+ datastore: "{{ vcenter_datastore }}"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
new file mode 100644
index 00000000..67898e0c
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: restart chronyd
+ service: name=chronyd state=restarted
+
+- name: restart networking
+ service: name=networking state=restarted
diff --git a/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
new file mode 100644
index 00000000..39dea695
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
@@ -0,0 +1,77 @@
+---
+- name: Determine if Atomic
+ stat: path=/run/ostree-booted
+ register: s
+ changed_when: false
+ check_mode: no
+
+- name: Init the is_atomic fact
+ set_fact:
+ is_atomic: false
+
+- name: Set the is_atomic fact
+ set_fact:
+ is_atomic: true
+ when: s.stat.exists
+
+- block:
+ - name: be sure all pre-req packages are installed
+ yum: name={{item}} state=installed
+ with_items:
+ - open-vm-tools
+ - PyYAML
+ - perl
+ - python-ipaddress
+ - net-tools
+ - chrony
+ - python-six
+ - iptables
+ - iptables-services
+ - docker{{ '-' + docker_version if docker_version is defined else '' }}
+ - dnsmasq
+ retries: 5
+ delay: 5
+ register: result
+ until: result is succeeded
+
+ - name: be sure openvmtools is running and enabled
+ service: name=vmtoolsd state=started enabled=yes
+
+ when:
+ - not is_atomic | bool
+ - ansible_distribution == "RedHat"
+
+- name: be sure chrony is configured
+ template: src=chrony.conf.j2 dest=/etc/chrony.conf
+ notify:
+ - restart chronyd
+
+- name: set link to localtime
+ command: timedatectl set-timezone {{timezone}}
+
+- name: be sure chronyd is running and enabled
+ service: name=chronyd state=started enabled=yes
+
+- block:
+ - name: (Atomic) Remove extra docker lv from root vg
+ lvol:
+ lv: docker-pool
+ vg: atomicos
+ state: absent
+ force: yes
+ - name: (Atomic) Grow root lv to fill vg
+ lvol:
+ lv: root
+ vg: atomicos
+ size: +100%FREE
+ - name: (Atomic) Grow root fs to match lv
+ filesystem:
+ dev: /dev/mapper/atomicos-root
+ fstype: xfs
+ resizefs: yes
+ - name: (Atomic) Force Ansible to re-gather disk facts
+ setup:
+ filter: 'ansible_mounts'
+ when:
+ - is_atomic | bool
+ - ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2 b/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
new file mode 100644
index 00000000..b8020cb0
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
@@ -0,0 +1,19 @@
+# This file is managed by Ansible
+
+server 0.rhel.pool.ntp.org
+server 1.rhel.pool.ntp.org
+server 2.rhel.pool.ntp.org
+server 3.rhel.pool.ntp.org
+
+driftfile /var/lib/chrony/drift
+makestep 10 3
+
+keyfile /etc/chrony.keys
+commandkey 1
+generatecommandkey
+
+noclientlog
+logchange 0.5
+
+logdir /var/log/chrony
+log measurements statistics tracking
diff --git a/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
new file mode 100644
index 00000000..a951d622
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
@@ -0,0 +1,3 @@
+---
+locale: en_US.UTF-8
+timezone: UTC
diff --git a/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml b/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
new file mode 100644
index 00000000..d53f5bd2
--- /dev/null
+++ b/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
@@ -0,0 +1,44 @@
+# NOTE(vponomar): this role should not be run from nodes
+# which are going to be rebooted.
+---
+
+- block:
+ - name: Check that hostnames_for_reboot var is set and it is not empty list
+ fail:
+ msg: "Role 'yum-update-and-reboot' expects 'hostnames_for_reboot' var
+ to be set as a list of hostnames which should be rebooted."
+ when: "(hostnames_for_reboot is not defined) or hostnames_for_reboot | length < 1"
+
+ - name: Run yum_update command
+ command: "yum update -y"
+ delegate_to: "{{ item }}"
+ with_items: "{{ hostnames_for_reboot }}"
+
+ - name: Reboot machine to apply all major changes to the system if exist
+ shell: "sleep 3 ; /sbin/shutdown -r now 'Reboot triggered by Ansible'"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ delegate_to: "{{ item }}"
+ with_items: "{{ hostnames_for_reboot }}"
+
+ - name: Wait for machine to go down
+ wait_for:
+ host: "{{ item }}"
+ port: 22
+ delay: 0
+ timeout: 180
+ connect_timeout: 5
+ state: stopped
+ with_items: "{{ hostnames_for_reboot }}"
+
+ - name: Wait for machine to go up
+ wait_for:
+ host: "{{ item }}"
+ port: 22
+ delay: 0
+ timeout: 180
+ connect_timeout: 5
+ state: started
+ with_items: "{{ hostnames_for_reboot }}"
+ when: "disable_yum_update_and_reboot is undefined or not (disable_yum_update_and_reboot | bool)"