summaryrefslogtreecommitdiffstats
path: root/deployment/playbooks
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2019-02-07 02:08:23 +0530
committerValerii Ponomarov <vponomar@redhat.com>2019-02-07 02:36:02 +0530
commit25fcd9c5aa4c360eff19ef08fc4e2bdff6147ffd (patch)
tree544cf09479861ee7c434a7f9ece19167c14ddf35 /deployment/playbooks
parenta6c7dead0d6ddad4dae93a4292891617b50b44a0 (diff)
Add end-to-end OCP 'deployment' functionality
Add end-to-end deployment tool of OpenShift and OpenShift Container Storage on top of VMWare. Added code is modified version of the 'reference-architecture/vmware-ansible' dir from the following repo: https://github.com/vponomaryov/openshift-ansible-contrib Read 'deployment/README.rst' file for more details about the deployment tool. Change-Id: Ic96f252ff786cc1ecf24d27f0ec47e324131e41b
Diffstat (limited to 'deployment/playbooks')
-rw-r--r--deployment/playbooks/add-node-prerequisite.yaml16
-rw-r--r--deployment/playbooks/add-node.yaml87
-rw-r--r--deployment/playbooks/clean.yaml66
-rw-r--r--deployment/playbooks/cleanup-cns.yaml38
-rw-r--r--deployment/playbooks/cleanup-crs.yaml38
-rw-r--r--deployment/playbooks/cns-node-setup.yaml76
-rw-r--r--deployment/playbooks/cns-setup.yaml121
-rw-r--r--deployment/playbooks/cns-storage.yaml15
-rw-r--r--deployment/playbooks/crs-node-setup.yaml68
-rw-r--r--deployment/playbooks/crs-setup.yaml163
-rw-r--r--deployment/playbooks/crs-storage.yaml12
-rw-r--r--deployment/playbooks/get_ocp_info.yaml231
l---------deployment/playbooks/library/rpm_q.py1
-rw-r--r--deployment/playbooks/library/vmware_folder.py253
-rw-r--r--deployment/playbooks/library/vmware_resource_pool.py330
-rw-r--r--deployment/playbooks/node-setup.yaml47
-rw-r--r--deployment/playbooks/noop.yaml7
-rw-r--r--deployment/playbooks/ocp-configure.yaml33
-rw-r--r--deployment/playbooks/ocp-end-to-end.yaml15
-rw-r--r--deployment/playbooks/ocp-install.yaml138
-rw-r--r--deployment/playbooks/prerequisite.yaml26
-rw-r--r--deployment/playbooks/prod-ose-cns.yaml11
-rw-r--r--deployment/playbooks/prod-ose-crs.yaml11
-rw-r--r--deployment/playbooks/prod.yaml20
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml13
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j211
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml8
-rw-r--r--deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml142
-rw-r--r--deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml143
-rw-r--r--deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml157
-rw-r--r--deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml66
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml7
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml34
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j24
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j27
-rw-r--r--deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml15
-rw-r--r--deployment/playbooks/roles/etcd-storage/tasks/main.yaml24
-rw-r--r--deployment/playbooks/roles/gluster-ports/defaults/main.yaml3
-rw-r--r--deployment/playbooks/roles/gluster-ports/tasks/main.yaml34
-rw-r--r--deployment/playbooks/roles/instance-groups/tasks/main.yaml155
-rw-r--r--deployment/playbooks/roles/master-prerequisites/tasks/main.yaml6
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml27
-rw-r--r--deployment/playbooks/roles/package-repos/tasks/main.yaml23
-rw-r--r--deployment/playbooks/roles/prerequisites/defaults/main.yaml6
l---------deployment/playbooks/roles/prerequisites/library/openshift_facts.py1
-rw-r--r--deployment/playbooks/roles/prerequisites/library/rpm_q.py72
-rw-r--r--deployment/playbooks/roles/prerequisites/tasks/main.yaml84
-rw-r--r--deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml14
-rw-r--r--deployment/playbooks/roles/rhsm/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/rhsm/tasks/main.yaml49
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml83
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml29
-rw-r--r--deployment/playbooks/roles/storage-class-configure/tasks/main.yaml22
-rw-r--r--deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j28
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml6
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml77
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j219
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml44
-rw-r--r--deployment/playbooks/scaleup.yaml35
-rw-r--r--deployment/playbooks/setup.yaml27
-rw-r--r--deployment/playbooks/vars/main.yaml31
64 files changed, 3325 insertions, 0 deletions
diff --git a/deployment/playbooks/add-node-prerequisite.yaml b/deployment/playbooks/add-node-prerequisite.yaml
new file mode 100644
index 00000000..f43b3545
--- /dev/null
+++ b/deployment/playbooks/add-node-prerequisite.yaml
@@ -0,0 +1,16 @@
+---
+- hosts: new_nodes
+ gather_facts: yes
+ become: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - package-repos
+
+- hosts: new_nodes
+ gather_facts: no
+ become: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - prerequisites
diff --git a/deployment/playbooks/add-node.yaml b/deployment/playbooks/add-node.yaml
new file mode 100644
index 00000000..8d99a5bd
--- /dev/null
+++ b/deployment/playbooks/add-node.yaml
@@ -0,0 +1,87 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ become: no
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - create-vm-add-prod-ose
+ - setup-custom-domain-names-for-ansible-runner
+
+- hosts: new_nodes
+ gather_facts: yes
+ become: no
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - setup-custom-domain-names
+ - instance-groups
+ - package-repos
+ - vmware-guest-setup
+ - cloud-provider-setup
+ - docker-storage-setup
+ - openshift-volume-quota
+
+# 'openshift_node_groups' var started being required since OCP3.10
+- hosts: allnodes
+ gather_facts: no
+ become: no
+ tasks:
+ - set_fact:
+ openshift_node_groups:
+ - name: node-config-master
+ labels:
+ - 'node-role.kubernetes.io/master=true'
+ - 'role=master'
+ edits: []
+ - name: node-config-compute
+ labels:
+ - 'node-role.kubernetes.io/compute=true'
+ - 'node-role.kubernetes.io/infra=true'
+ - 'role=compute'
+ edits: []
+ - name: node-config-storage
+ labels:
+ - 'node-role.kubernetes.io/storage=true'
+ - 'role=storage'
+ edits: []
+
+- include: add-node-prerequisite.yaml
+ when: openshift_vers in ['v3_6', 'v3_7']
+
+- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
+ 'noop.yaml',
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
+ ) }} hosts=new_nodes"
+ when: openshift_vers not in ['v3_6', 'v3_7']
+
+- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
+ 'noop.yaml',
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
+ ) }} hosts=new_nodes"
+ when: openshift_vers not in ['v3_6', 'v3_7']
+
+- name: Map domain names and IP addresses of old and new nodes to each other
+ hosts: master, compute, cns, crs, !new_nodes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - setup-custom-domain-names
+
+- include: node-setup.yaml
+
+- hosts: allnodes
+ gather_facts: no
+ become: no
+ tasks:
+ - name: Make sure dnsmasq is running, enabled and restarted
+ service: name=dnsmasq state=restarted enabled=yes
+
+- hosts: localhost
+ gather_facts: no
+ become: no
+ roles:
+ - yum-update-and-reboot
diff --git a/deployment/playbooks/clean.yaml b/deployment/playbooks/clean.yaml
new file mode 100644
index 00000000..68da95ec
--- /dev/null
+++ b/deployment/playbooks/clean.yaml
@@ -0,0 +1,66 @@
+---
+- hosts: localhost
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - instance-groups
+
+- hosts: allnodes
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - rhsm-unregister
+
+- hosts: localhost
+ user: root
+ become: false
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ tasks:
+ - name: Delete all added VMs
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: "{{ hostvars[item].inventory_hostname }}"
+ datacenter: "{{ vcenter_datacenter }}"
+ cluster: "{{ vcenter_cluster }}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ folder: "/{{ vcenter_datacenter }}/vm/{{ vcenter_folder }}"
+ state: absent
+ force: true
+ with_items: "{{ groups['allnodes'] }}"
+
+ - name: Get current user home dir
+ shell: 'eval echo "~$USER"'
+ register: home_dir
+ - name: Set hosts files paths
+ set_fact:
+ home_hosts_file: "{{ home_dir.stdout_lines[0] + '/.ssh/config' }}"
+ system_hosts_file: "/etc/hosts"
+ - name: Check 'write' permissions for system hosts file
+ stat:
+ path: "{{ system_hosts_file }}"
+ register: stat_system_hosts
+
+ - name: Update system hosts file if writeable
+ lineinfile:
+ dest: "{{ system_hosts_file }}"
+ state: absent
+ regexp: "{{ hostvars[item].inventory_hostname }}"
+ create: true
+ with_items: "{{ groups['allnodes'] }}"
+ when: "stat_system_hosts.stat.writeable"
+ - name: Update user's SSH hosts file
+ lineinfile:
+ dest: "{{ home_hosts_file }}"
+ state: present
+ line: "Host obsolete-{{ item }}"
+ regexp: "Host {{ item }}"
+ create: true
+ mode: '644'
+ with_items: "{{ groups['allnodes'] }}"
diff --git a/deployment/playbooks/cleanup-cns.yaml b/deployment/playbooks/cleanup-cns.yaml
new file mode 100644
index 00000000..5a2d8497
--- /dev/null
+++ b/deployment/playbooks/cleanup-cns.yaml
@@ -0,0 +1,38 @@
+---
+- hosts: localhost
+ user: root
+ become: false
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - instance-groups
+
+- hosts: cns
+ user: root
+ become: false
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - rhsm-unregister
+
+- hosts: localhost
+ user: root
+ become: false
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ tasks:
+ - name: Delete cns VMs
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ vcenter_datacenter }}"
+ folder: "/{{ vcenter_folder }}"
+ name: "{{ item.value.guestname }}"
+ state: absent
+ force: true
+ with_dict: "{{host_inventory}}"
+ when: "'cns' in item.value.guestname"
diff --git a/deployment/playbooks/cleanup-crs.yaml b/deployment/playbooks/cleanup-crs.yaml
new file mode 100644
index 00000000..3d6ee533
--- /dev/null
+++ b/deployment/playbooks/cleanup-crs.yaml
@@ -0,0 +1,38 @@
+---
+- hosts: localhost
+ user: root
+ become: false
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - instance-groups
+
+- hosts: crs
+ user: root
+ become: false
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - rhsm-unregister
+
+- hosts: localhost
+ user: root
+ become: false
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ tasks:
+ - name: Delete crs VMs
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ vcenter_datacenter }}"
+ folder: "/{{ vcenter_folder }}"
+ name: "{{ item.value.guestname }}"
+ state: absent
+ force: true
+ with_dict: "{{host_inventory}}"
+ when: "'crs' in item.value.guestname"
diff --git a/deployment/playbooks/cns-node-setup.yaml b/deployment/playbooks/cns-node-setup.yaml
new file mode 100644
index 00000000..fb699625
--- /dev/null
+++ b/deployment/playbooks/cns-node-setup.yaml
@@ -0,0 +1,76 @@
+---
+- hosts: cns
+ gather_facts: yes
+ become: no
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - setup-custom-domain-names
+ - instance-groups
+ - package-repos
+ - vmware-guest-setup
+ - cloud-provider-setup
+ - docker-storage-setup
+ - openshift-volume-quota
+ - gluster-ports
+
+# 'openshift_node_groups' var started being required since OCP3.10
+- hosts: allnodes
+ gather_facts: no
+ become: no
+ tasks:
+ - set_fact:
+ openshift_node_groups:
+ - name: node-config-master
+ labels:
+ - 'node-role.kubernetes.io/master=true'
+ - 'role=master'
+ edits: []
+ - name: node-config-compute
+ labels:
+ - 'node-role.kubernetes.io/compute=true'
+ - 'node-role.kubernetes.io/infra=true'
+ - 'role=compute'
+ edits: []
+ - name: node-config-storage
+ labels:
+ - 'node-role.kubernetes.io/storage=true'
+ - 'role=storage'
+ edits: []
+
+- include: add-node-prerequisite.yaml
+ when: openshift_vers in ['v3_6', 'v3_7']
+
+- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
+ 'noop.yaml',
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
+ ) }} hosts=new_nodes"
+ when: openshift_vers not in ['v3_6', 'v3_7']
+
+- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
+ 'noop.yaml',
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
+ ) }} hosts=new_nodes"
+ when: openshift_vers not in ['v3_6', 'v3_7']
+
+- name: Map domain names and IP addresses of old and new nodes to each other
+ hosts: master, compute, crs
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - setup-custom-domain-names
+
+- hosts: allnodes
+ gather_facts: no
+ become: no
+ tasks:
+ - name: Make sure dnsmasq is running, enabled and restarted
+ service: name=dnsmasq state=restarted enabled=yes
+
+- hosts: localhost
+ gather_facts: no
+ become: no
+ roles:
+ - yum-update-and-reboot
diff --git a/deployment/playbooks/cns-setup.yaml b/deployment/playbooks/cns-setup.yaml
new file mode 100644
index 00000000..74e58e8f
--- /dev/null
+++ b/deployment/playbooks/cns-setup.yaml
@@ -0,0 +1,121 @@
+---
+- hosts: cns
+ tasks:
+ - name: Install required kernel modules on CNS nodes
+ import_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+
+- name: Restart dnsmasq to make our custom configs take effect
+ hosts: allnodes
+ tasks:
+ - service:
+ name: dnsmasq
+ state: restarted
+
+- hosts: single_master
+ tasks:
+ - name: Perform actions on master node which are required to install CNS
+ import_role:
+ name: openshift_storage_glusterfs
+ vars:
+ openshift_storage_glusterfs_name: 'storage'
+ openshift_storage_glusterfs_namespace: 'storage'
+ openshift_storage_glusterfs_is_native: true
+ openshift_storage_glusterfs_storageclass: true
+ openshift_storage_glusterfs_block_storageclass: true
+ openshift_storage_glusterfs_s3_deploy: false
+ openshift_storage_glusterfs_heketi_admin_key: "{{
+ (dp_tool_heketi_admin_key.strip() != '') |
+ ternary(dp_tool_heketi_admin_key.strip(), omit) }}"
+ openshift_storage_glusterfs_heketi_user_key: "{{
+ (dp_tool_heketi_user_key.strip() != '') |
+ ternary(dp_tool_heketi_user_key.strip(), omit) }}"
+ openshift_storage_glusterfs_heketi_topology_load: true
+ - name: Allow to expand PVCs using 'glusterfs' storageclass.
+ oc_edit:
+ kind: sc
+ name: glusterfs-{{ glusterfs_name }}
+ content:
+ allowVolumeExpansion: true
+
+- name: Get IP address of the node with router
+ hosts: single_master
+ tasks:
+ - command: "oc get endpoints router -o=custom-columns=:.subsets[*].addresses[0].ip -n default"
+ register: router_get
+ - set_fact:
+ router_ip: "{{ router_get.stdout_lines[1].strip() }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['allnodes'] }}"
+
+- name: Update dnsmasq config with custom domain zone for apps
+ hosts: allnodes
+ tasks:
+ - lineinfile:
+ path: /etc/dnsmasq.conf
+ line: "address=/.{{ app_dns_prefix }}.{{ dns_zone }}/{{ router_ip }}"
+ - service:
+ name: dnsmasq
+ state: restarted
+
+- name: Set External IP address for heketi service
+ hosts: single_master
+ tasks:
+ - command: "python -c \"import yaml ;
+ config = yaml.load(open('/etc/origin/master/master-config.yaml', 'r'));
+ print(config['kubernetesMasterConfig']['masterIP'])
+ \""
+ register: master_ipv4
+ - set_fact:
+ master_ipv4: "{{ master_ipv4.stdout_lines[0] }}"
+ - command: "oc patch svc heketi-storage
+ --namespace storage
+ -p '{\"spec\":{\"externalIPs\":[\"{{ master_ipv4 }}\"]}}'"
+ run_once: true
+
+# Following updates config file
+# which is required for automated tests from 'cns-automation' repo
+
+- name: Update 'cns-automation' config file
+ hosts: localhost
+ tasks:
+ - set_fact:
+ master_ipv4: "{{ hostvars[groups['single_master'][0]].master_ipv4 }}"
+ - yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: openshift.storage_project_name
+ value: "storage"
+ - key: openshift.heketi_config.heketi_dc_name
+ value: "heketi-storage"
+ - key: openshift.heketi_config.heketi_service_name
+ value: "heketi-storage"
+ - key: openshift.heketi_config.heketi_client_node
+ value: "{{ master_ipv4 }}"
+ - key: openshift.heketi_config.heketi_server_url
+ value: "http://{{ master_ipv4 }}:8080"
+ - key: openshift.heketi_config.heketi_cli_user
+ value: 'admin'
+ - key: openshift.heketi_config.heketi_cli_key
+ value: "{{ dp_tool_heketi_admin_key }}"
+ - key: openshift.dynamic_provisioning.storage_classes
+ value:
+ file_storage_class:
+ provisioner: "kubernetes.io/glusterfs"
+ resturl: "http://{{ master_ipv4 }}:8080"
+ restuser: "admin"
+ secretnamespace: "storage"
+ volumenameprefix: "autotests-file"
+ block_storage_class:
+ provisioner: "gluster.org/glusterblock"
+ resturl: "http://{{ master_ipv4 }}:8080"
+ restuser: "admin"
+ restsecretnamespace: "storage"
+ volumenameprefix: "autotests-block"
+ hacount: "3"
+ chapauthenabled: "true"
+ when: cns_automation_config_file_path | length > 0
+ run_once: true
diff --git a/deployment/playbooks/cns-storage.yaml b/deployment/playbooks/cns-storage.yaml
new file mode 100644
index 00000000..6df9dbd7
--- /dev/null
+++ b/deployment/playbooks/cns-storage.yaml
@@ -0,0 +1,15 @@
+---
+- include: prod-ose-cns.yaml
+ tags: ['vms']
+
+- include: cns-node-setup.yaml
+ tags: [ 'node-setup']
+
+- include: node-setup.yaml
+ tags: [ 'node-setup']
+
+- include: cns-setup.yaml
+ tags: [ 'node-setup']
+
+- include: cleanup-cns.yaml
+ tags: ['clean']
diff --git a/deployment/playbooks/crs-node-setup.yaml b/deployment/playbooks/crs-node-setup.yaml
new file mode 100644
index 00000000..8dc9eba1
--- /dev/null
+++ b/deployment/playbooks/crs-node-setup.yaml
@@ -0,0 +1,68 @@
+---
+- hosts: crs
+ gather_facts: yes
+ become: no
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - setup-custom-domain-names
+ - instance-groups
+ - package-repos
+ - vmware-guest-setup
+ - crs-prerequisite
+ - gluster-ports
+
+# 'openshift_node_groups' var started being required since OCP3.10
+- hosts: allnodes
+ gather_facts: no
+ become: no
+ tasks:
+ - set_fact:
+ openshift_node_groups:
+ - name: node-config-master
+ labels:
+ - 'node-role.kubernetes.io/master=true'
+ - 'role=master'
+ edits: []
+ - name: node-config-compute
+ labels:
+ - 'node-role.kubernetes.io/compute=true'
+ - 'node-role.kubernetes.io/infra=true'
+ - 'role=compute'
+ edits: []
+ - name: node-config-storage
+ labels:
+ - 'node-role.kubernetes.io/storage=true'
+ - 'role=storage'
+ edits: []
+
+- hosts: crs
+ gather_facts: no
+ become: no
+ vars_files:
+ - vars/main.yaml
+ tasks:
+ - name: Install required kernel modules on CRS nodes
+ import_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+
+- name: Map domain names and IP addresses of old and new nodes to each other
+ hosts: master, compute, cns
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - setup-custom-domain-names
+
+- hosts: allnodes
+ gather_facts: no
+ become: no
+ tasks:
+ - name: be sure dnsmasq is running and enabled
+ service: name=dnsmasq state=restarted enabled=yes
+
+- hosts: localhost
+ gather_facts: no
+ become: no
+ roles:
+ - yum-update-and-reboot
diff --git a/deployment/playbooks/crs-setup.yaml b/deployment/playbooks/crs-setup.yaml
new file mode 100644
index 00000000..e8ef6ad5
--- /dev/null
+++ b/deployment/playbooks/crs-setup.yaml
@@ -0,0 +1,163 @@
+---
+- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
+ 'noop.yaml',
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
+ ) }} hosts=single_master"
+ when: openshift_vers not in ['v3_6', 'v3_7']
+
+- hosts: single_master
+ tasks:
+ - name: Label common compute nodes be suitable for Heketi POD
+ oc_label:
+ name: '{{ item }}'
+ kind: 'node'
+ state: 'add'
+ labels:
+ - key: 'glusterfs'
+ value: 'heketi-host'
+ - key: 'heketi'
+ value: 'heketi-host'
+ with_items: "{{ groups[cluster_id + '-compute'] }}"
+ ignore_errors: true
+
+# Prepare SSH key pair before CRS installation
+- hosts: localhost
+ ignore_errors: no
+ tasks:
+ - name: Define path for the SSH key
+ set_fact:
+ crs_ssh_keypath: "/root/.ssh/crs_nodes_{{
+ cluster_id + '_' + (999999999999999 | random | string ) }}"
+ - name: Generate SSH key pair for Heketi and CRS interactions
+ shell: "yes y| ssh-keygen -b 2048 -t rsa -f {{ crs_ssh_keypath }} -q -N ''"
+ args:
+ creates: "{{ crs_ssh_keypath }}"
+ - name: Read contents of the public SSH key
+ command: "cat {{ crs_ssh_keypath }}.pub"
+ register: crs_pub_key_raw
+ - name: Save public SSH key data to the variable
+ set_fact:
+ crs_pub_key: "{{ crs_pub_key_raw.stdout_lines[0].strip() }}"
+ - name: Copy public SSH key onto CRS nodes
+ shell: "echo {{ crs_pub_key }} >> /root/.ssh/authorized_keys"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ with_items: "{{ groups[cluster_id + '-crs'] }}"
+ - name: Set var with SSH key path for master nodes
+ set_fact:
+ crs_ssh_keypath: "{{ crs_ssh_keypath }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ with_items: "{{ groups[cluster_id + '-master'] }}"
+
+# Run CRS installation
+- hosts: single_master
+ tasks:
+ - name: Perform actions on master node which are required to install CRS
+ import_role:
+ name: openshift_storage_glusterfs
+ vars:
+ openshift_storage_glusterfs_name: 'storage'
+ openshift_storage_glusterfs_namespace: 'storage'
+ openshift_storage_glusterfs_is_native: false
+ openshift_storage_glusterfs_heketi_is_native: true
+ openshift_storage_glusterfs_heketi_admin_key: "{{
+ (dp_tool_heketi_admin_key.strip() != '') |
+ ternary(dp_tool_heketi_admin_key.strip(), omit) }}"
+ openshift_storage_glusterfs_heketi_user_key: "{{
+ (dp_tool_heketi_user_key.strip() != '') |
+ ternary(dp_tool_heketi_user_key.strip(), omit) }}"
+ openshift_storage_glusterfs_storageclass: true
+ openshift_storage_glusterfs_block_storageclass: true
+ openshift_storage_glusterfs_s3_deploy: false
+ openshift_storage_glusterfs_nodeselector: 'role=compute'
+ openshift_storage_glusterfs_heketi_executor: 'ssh'
+ openshift_storage_glusterfs_heketi_ssh_keyfile: "{{ crs_ssh_keypath }}"
+ - name: Allow to expand PVCs using 'glusterfs' storageclass.
+ oc_edit:
+ kind: sc
+ name: glusterfs-{{ glusterfs_name }}
+ content:
+ allowVolumeExpansion: true
+
+- name: Get IP address of the node with router
+ hosts: single_master
+ tasks:
+ - command: "oc get endpoints router -o=custom-columns=:.subsets[*].addresses[0].ip -n default"
+ register: router_get
+ - set_fact:
+ router_ip: "{{ router_get.stdout_lines[1].strip() }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['allnodes'] }}"
+
+- name: Restart dnsmasq on all the nodes to apply all the changes we made
+ hosts: allnodes
+ tasks:
+ - lineinfile:
+ path: /etc/dnsmasq.conf
+ line: "address=/.{{ app_dns_prefix }}.{{ dns_zone }}/{{ router_ip }}"
+ - service:
+ name: dnsmasq
+ state: restarted
+
+- name: Get IPv4 address of the main master node
+ hosts: single_master
+ tasks:
+ - command: "python -c \"import yaml ;
+ config = yaml.load(open('/etc/origin/master/master-config.yaml', 'r'));
+ print(config['kubernetesMasterConfig']['masterIP'])
+ \""
+ register: master_ipv4
+ - set_fact:
+ master_ipv4: "{{ master_ipv4.stdout_lines[0] }}"
+ - command: "oc patch svc heketi-storage
+ --namespace storage
+ -p '{\"spec\":{\"externalIPs\":[\"{{ master_ipv4 }}\"]}}'"
+ run_once: true
+
+# Following updates config file
+# which is required for automated tests from 'cns-automation' repo
+
+- name: Update 'cns-automation' config file
+ hosts: localhost
+ tasks:
+ - set_fact:
+ master_ipv4: "{{ hostvars[groups['single_master'][0]].master_ipv4 }}"
+ - yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: openshift.storage_project_name
+ value: "storage"
+ - key: openshift.heketi_config.heketi_dc_name
+ value: "heketi-storage"
+ - key: openshift.heketi_config.heketi_service_name
+ value: "heketi-storage"
+ - key: openshift.heketi_config.heketi_client_node
+ value: "{{ master_ipv4 }}"
+ - key: openshift.heketi_config.heketi_server_url
+ value: "http://{{ master_ipv4 }}:8080"
+ - key: openshift.heketi_config.heketi_cli_user
+ value: 'admin'
+ - key: openshift.heketi_config.heketi_cli_key
+ value: "{{ dp_tool_heketi_admin_key }}"
+ - key: openshift.dynamic_provisioning.storage_classes
+ value:
+ file_storage_class:
+ provisioner: "kubernetes.io/glusterfs"
+ resturl: "http://{{ master_ipv4 }}:8080"
+ restuser: "admin"
+ secretnamespace: "storage"
+ volumenameprefix: "autotests-file"
+ block_storage_class:
+ provisioner: "gluster.org/glusterblock"
+ resturl: "http://{{ master_ipv4 }}:8080"
+ restuser: "admin"
+ restsecretnamespace: "storage"
+ volumenameprefix: "autotests-block"
+ hacount: "3"
+ chapauthenabled: "true"
+ when: cns_automation_config_file_path | length > 0
+ run_once: true
diff --git a/deployment/playbooks/crs-storage.yaml b/deployment/playbooks/crs-storage.yaml
new file mode 100644
index 00000000..cee0da69
--- /dev/null
+++ b/deployment/playbooks/crs-storage.yaml
@@ -0,0 +1,12 @@
+---
+- include: prod-ose-crs.yaml
+ tags: ['vms']
+
+- include: crs-node-setup.yaml
+ tags: [ 'node-setup' ]
+
+- include: crs-setup.yaml
+ tags: [ 'node-setup']
+
+- include: cleanup-crs.yaml
+ tags: ['clean']
diff --git a/deployment/playbooks/get_ocp_info.yaml b/deployment/playbooks/get_ocp_info.yaml
new file mode 100644
index 00000000..dfce216c
--- /dev/null
+++ b/deployment/playbooks/get_ocp_info.yaml
@@ -0,0 +1,231 @@
+---
+# Run this playbook that way:
+# $ ansible-playbook -i ocp-master-node-hostname-or-ip, get_ocp_info.yaml
+
+# Ansible runner machine info
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Generate name for data file
+ set_fact:
+ data_file_path: "{{ lookup('env', 'VIRTUAL_ENV') }}/../../ocp_{{
+ (groups['all'][0]).replace('.', '_')
+ }}_info.yaml"
+ - name: Print data file name
+ debug:
+ msg: "Data file path is '{{ data_file_path }}'"
+ - name: "[Re]Create file where we are going to store gathered data"
+ copy:
+ content: ""
+ dest: "{{ data_file_path }}"
+ mode: 0644
+ force: yes
+
+ - name: Get Linux kernel version of ansible runner
+ shell: "uname -a"
+ register: ansible_runner_linux_kernel_version
+ - name: Get Red Hat release info for ansible runner
+ shell: "cat /etc/redhat-release"
+ register: ansible_runner_rh_release
+ ignore_errors: yes
+ - name: Get ansible-playbook version from ansible runner
+ shell: "{{ lookup('env', 'VIRTUAL_ENV') }}/bin/ansible-playbook --version |
+ grep '^ansible-playbook' | awk '{print $2}'"
+ register: ansible_runner_ansible_playbook_version
+ - name: Get 'openshift-ansible' lib version used by ansible runner
+ shell: "echo \"openshift-ansible-$(cat {{
+ lookup('env', 'VIRTUAL_ENV')
+ }}/usr/share/ansible/openshift-ansible/.tito/packages/openshift-ansible | awk '{print $1}')\""
+ register: ansible_runner_oc_lib_version
+ - name: Write ansible runner data to the data file
+ yedit:
+ src: "{{ data_file_path }}"
+ state: present
+ backup: false
+ edits:
+ - key: 01_ansible_runner
+ value:
+ Linux kernel version: "{{ ansible_runner_linux_kernel_version.stdout_lines }}"
+ Red Hat release info: "{{
+ ansible_runner_rh_release.stdout_lines or
+ 'File /etc/redhat-release was not found. Not RHEL machine?' }}"
+ ansible-playbook version: "{{ ansible_runner_ansible_playbook_version.stdout_lines }}"
+ openshift-ansible lib version: "{{ ansible_runner_oc_lib_version.stdout_lines }}"
+
+# === Master node info ===
+- hosts: all[0]
+ gather_facts: no
+ vars:
+ master_package_list:
+ - docker
+ - heketi
+ master_service_list:
+ - docker
+ - multipathd
+ gluster_pod_package_list:
+ - gluster
+ - heketi
+ - targetcli
+ - gluster-block
+ - tcmu-runner
+ - python-configshell
+ - python-rtslib
+ gluster_pod_service_list:
+ - glusterd
+ - heketi
+ - gluster-blockd
+ - tcmu-runner
+ heketi_pod_package_list:
+ - gluster
+ - heketi
+ # NOTE(vponomar): we do not process list of Heketi POD services for 2 reasons:
+ # 1) No requirement to get status of any of services on Heketi POD.
+ # 2) 'systemctl' does not work on it.
+ tasks:
+ - name: Get distro version of ansible runner
+ shell: "uname -a"
+ register: master_linux_kernel_version
+ - name: Get Red Hat release info for ansible runner
+ shell: "cat /etc/redhat-release"
+ register: master_rh_release
+ - name: Create grep filter with all the packages we are interested in
+ set_fact:
+ package_filter: "{{ package_filter | default('grep') + ' -e ' + item }}"
+ with_items: "{{ master_package_list }}"
+ - name: Get list of installed packages we are interested in
+ shell: "rpm -qa | {{ package_filter }}"
+ register: master_packages
+ - name: Get status of services on OCP Master node
+ shell: "systemctl list-units {{ master_service_list | join('.service ') }}.service
+ --type=service --all --no-pager --no-legend"
+ register: master_services
+ - name: Get OpenShift client version
+ shell: "oc version | grep -e 'oc ' -e 'openshift' -e 'kube'"
+ register: master_oc_version
+ - name: Get list of OCP nodes
+ shell: "oc get nodes"
+ register: master_ocp_nodes
+ - name: Get info about all the docker images used in OCP cluster
+ shell: "oc get pods --all-namespaces
+ -o=custom-columns=:.status.containerStatuses[*].image | grep -v -e '^$' | uniq"
+ register: master_image_info
+ - name: Write master data to the data file
+ delegate_to: localhost
+ yedit:
+ src: "{{ hostvars['localhost']['data_file_path'] }}"
+ state: present
+ edits:
+ - key: 02_master
+ value:
+ Linux kernel version: "{{ master_linux_kernel_version.stdout_lines }}"
+ Red Hat release info: "{{ master_rh_release.stdout_lines }}"
+ List of Packages: "{{ master_packages.stdout_lines }}"
+ List of services: "{{ master_services.stdout_lines }}"
+ OC Version: "{{ master_oc_version.stdout_lines }}"
+ OCP nodes: "{{ master_ocp_nodes.stdout_lines }}"
+ Images info: "{{ master_image_info.stdout_lines }}"
+
+ # Heketi POD
+ - name: Get heketi POD
+ shell: "oc get pods --all-namespaces -l heketi
+ -o=custom-columns=:.metadata.name,:.metadata.namespace"
+ register: heketi_pods
+ - name: DEBUG HEKETI
+ debug:
+ msg: "{{ heketi_pods }}"
+ - block:
+ - name: Get storage release version from Heketi POD
+ shell: "oc exec {{ (heketi_pods.stdout_lines[1].split(' ') | list)[0] }}
+ --namespace {{ (heketi_pods.stdout_lines[1].split(' ') | list)[-1] }} --
+ cat /etc/redhat-storage-release"
+ register: heketi_pod_storage_release_version
+ - name: Get info about packages on Heketi POD
+ shell: "oc exec {{ (heketi_pods.stdout_lines[1].split(' ') | list)[0] }}
+ --namespace {{ (heketi_pods.stdout_lines[1].split(' ') | list)[-1] }} --
+ rpm -qa | grep -e {{ heketi_pod_package_list | join(' -e ') }}"
+ register: heketi_pod_packages
+ - name: Write Heketi data to the data file
+ delegate_to: localhost
+ yedit:
+ src: "{{ hostvars['localhost']['data_file_path'] }}"
+ state: present
+ edits:
+ - key: 03_heketi_pod
+ value:
+ Storage release version: "{{ heketi_pod_storage_release_version.stdout_lines }}"
+ List of Packages: "{{ heketi_pod_packages.stdout_lines }}"
+ when: "{{ ((heketi_pods.stdout_lines | join('')).strip() | length) > 0 }}"
+
+ # Gluster PODs
+ - name: Get list of Gluster PODs
+ shell: "oc get pods --all-namespaces -l glusterfs-node
+ -o=custom-columns=:.metadata.name,:.metadata.namespace"
+ register: gluster_pods
+ - name: DEBUG GLUSTER
+ debug:
+ msg: "{{ gluster_pods }}"
+ - block:
+ - name: Get storage release version from Gluster PODs
+ shell: "oc exec {{ (item.split(' ') | list)[0] }}
+ --namespace {{ (item.split(' ') | list)[-1] }} --
+ cat /etc/redhat-storage-release"
+ with_items: "{{ gluster_pods.stdout_lines[1:] }}"
+ register: gluster_pod_storage_release_version_results
+ - name: Process gluster PODs storage release versions results
+ set_fact:
+ gluster_pod_storage_release_version_processed: "{{
+ gluster_pod_storage_release_version_processed | default({}) | combine(
+ {(item.item.strip().split(' ')[0]): item.stdout_lines},
+ recursive=True
+ ) }}"
+ with_items: "{{ gluster_pod_storage_release_version_results.results }}"
+ - name: Get info about packages on Gluster PODs
+ shell: "oc exec {{ (item.split(' ') | list)[0] }}
+ --namespace {{ (item.split(' ') | list)[-1] }} --
+ rpm -qa | grep -e {{ gluster_pod_package_list | join(' -e ') }}"
+ with_items: "{{ gluster_pods.stdout_lines[1:] }}"
+ register: gluster_pod_package_list_results
+ - name: Process gluster PODs package lists results
+ set_fact:
+ gluster_pod_package_list_processed: "{{
+ gluster_pod_package_list_processed | default({}) | combine(
+ {(item.item.strip().split(' ')[0]): item.stdout_lines},
+ recursive=True
+ ) }}"
+ with_items: "{{ gluster_pod_package_list_results.results }}"
+ - name: Get info about services on Gluster PODs
+ shell: "oc exec {{ (item.split(' ') | list)[0] }}
+ --namespace {{ (item.split(' ') | list)[-1] }} --
+ systemctl list-units {{ gluster_pod_service_list | join('.service ') }}.service
+ --type=service --all --no-pager --no-legend"
+ with_items: "{{ gluster_pods.stdout_lines[1:] }}"
+ register: gluster_pod_service_list_results
+ - name: Process gluster PODs service lists results
+ set_fact:
+ gluster_pod_service_list_processed: "{{
+ gluster_pod_service_list_processed | default({}) | combine(
+ {(item.item.strip().split(' ')[0]): item.stdout_lines},
+ recursive=True
+ ) }}"
+ with_items: "{{ gluster_pod_service_list_results.results }}"
+ - name: Write Gluster PODs data to the data file
+ delegate_to: localhost
+ yedit:
+ src: "{{ hostvars['localhost']['data_file_path'] }}"
+ state: present
+ edits:
+ - key: 04_gluster_pods
+ value:
+ Storage release version: "{{ gluster_pod_storage_release_version_processed }}"
+ List of Packages: "{{ gluster_pod_package_list_processed }}"
+ List of Services: "{{ gluster_pod_service_list_processed }}"
+ when: "{{ ((gluster_pods.stdout_lines | join('')).strip() | length) > 0 }}"
+
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - shell: "cat {{ data_file_path }}"
+ register: data_file_content
+ - name: Print gathered data
+ debug:
+ msg: "{{ data_file_content.stdout_lines }}"
diff --git a/deployment/playbooks/library/rpm_q.py b/deployment/playbooks/library/rpm_q.py
new file mode 120000
index 00000000..43f43786
--- /dev/null
+++ b/deployment/playbooks/library/rpm_q.py
@@ -0,0 +1 @@
+/usr/share/ansible/openshift-ansible/library/rpm_q.py \ No newline at end of file
diff --git a/deployment/playbooks/library/vmware_folder.py b/deployment/playbooks/library/vmware_folder.py
new file mode 100644
index 00000000..399f1d04
--- /dev/null
+++ b/deployment/playbooks/library/vmware_folder.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Davis Phillips davis.phillips@gmail.com
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_folder
+short_description: Add/remove folders to/from vCenter
+description:
+ - This module can be used to add/remove a folder to/from vCenter
+version_added: 2.3
+author: "Davis Phillips (@dav1x)"
+notes:
+ - Tested on vSphere 6.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter:
+ description:
+ - Name of the datacenter to add the host
+ required: True
+ cluster:
+ description:
+ - Name of the cluster to add the host
+ required: True
+ folder:
+ description:
+ - Folder name to manage
+ required: True
+ hostname:
+ description:
+ - ESXi hostname to manage
+ required: True
+ username:
+ description:
+ - ESXi username
+ required: True
+ password:
+ description:
+ - ESXi password
+ required: True
+ state:
+ description:
+ - Add or remove the folder
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Create a folder
+ - name: Add a folder to vCenter
+ vmware_folder:
+ hostname: vcsa_host
+ username: vcsa_user
+ password: vcsa_pass
+ datacenter: datacenter
+ cluster: cluster
+ folder: folder
+ state: present
+'''
+
+RETURN = """
+instance:
+ descripton: metadata about the new folder
+ returned: always
+ type: dict
+ sample: None
+"""
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils.vmware import get_all_objs, connect_to_api, vmware_argument_spec, find_datacenter_by_name, \
+ find_cluster_by_name_datacenter, wait_for_task
+from ansible.module_utils.basic import AnsibleModule
+
+class VMwareFolder(object):
+ def __init__(self, module):
+ self.module = module
+ self.datacenter = module.params['datacenter']
+ self.cluster = module.params['cluster']
+ self.folder = module.params['folder']
+ self.hostname = module.params['hostname']
+ self.username = module.params['username']
+ self.password = module.params['password']
+ self.state = module.params['state']
+ self.dc_obj = None
+ self.cluster_obj = None
+ self.host_obj = None
+ self.folder_obj = None
+ self.folder_name = None
+ self.folder_expanded = None
+ self.folder_full_path = []
+ self.content = connect_to_api(module)
+
+ def find_host_by_cluster_datacenter(self):
+ self.dc_obj = find_datacenter_by_name(self.content, self.datacenter)
+ self.cluster_obj = find_cluster_by_name_datacenter(self.dc_obj, self.cluster)
+
+ for host in self.cluster_obj.host:
+ if host.name == self.hostname:
+ return host, self.cluster
+
+ return None, self.cluster
+
+ def select_folder(self, host):
+ fold_obj = None
+ self.folder_expanded = self.folder.split("/")
+ last_e = self.folder_expanded.pop()
+ fold_obj = self.get_obj([vim.Folder],last_e)
+ if fold_obj:
+ return fold_obj
+ if fold_obj is None:
+ return fold_obj
+
+ def get_obj(self, vimtype, name, return_all = False):
+ obj = list()
+ container = self.content.viewManager.CreateContainerView(
+ self.content.rootFolder, vimtype, True)
+
+ for c in container.view:
+ if name in [c.name, c._GetMoId()]:
+ if return_all is False:
+ return c
+ break
+ else:
+ obj.append(c)
+
+ if len(obj) > 0:
+ return obj
+ else:
+ # for backwards-compat
+ return None
+
+ def process_state(self):
+ try:
+ folder_states = {
+ 'absent': {
+ 'present': self.state_remove_folder,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_add_folder,
+ }
+ }
+
+ folder_states[self.state][self.check_folder_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg = runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg = method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg = str(e))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed = False)
+
+ def state_remove_folder(self):
+ changed = True
+ result = None
+ self.folder_expanded = self.folder.split("/")
+ f = self.folder_expanded.pop()
+ task = self.get_obj([vim.Folder],f).Destroy()
+
+ try:
+ success, result = wait_for_task(task)
+
+ except:
+ self.module.fail_json(msg = "Failed to remove folder '%s' '%s'" % (self.folder,folder))
+
+ self.module.exit_json(changed = changed, result = str(result))
+
+ def state_add_folder(self):
+ changed = True
+ result = None
+
+ self.dc_obj = find_datacenter_by_name(self.content, self.datacenter)
+ self.cluster_obj = find_cluster_by_name_datacenter(self.dc_obj, self.cluster)
+ self.folder_expanded = self.folder.split("/")
+ index = 0
+ for f in self.folder_expanded:
+ if not self.get_obj([vim.Folder],f):
+ if index == 0:
+ #First object gets created on the datacenter
+ task = self.dc_obj.vmFolder.CreateFolder(name=f)
+ else:
+ parent_f = self.get_obj([vim.Folder],self.folder_expanded[index - 1])
+ task = parent_f.CreateFolder(name=f)
+ index = index + 1
+
+ self.module.exit_json(changed = changed)
+
+ def check_folder_state(self):
+
+ self.host_obj, self.cluster_obj = self.find_host_by_cluster_datacenter()
+ self.folder_obj = self.select_folder(self.host_obj)
+
+ if self.folder_obj is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter = dict(required = True, type = 'str'),
+ cluster = dict(required = True, type = 'str'),
+ folder = dict(required=True, type='str'),
+ hostname = dict(required = True, type = 'str'),
+ username = dict(required = True, type = 'str'),
+ password = dict(required = True, type = 'str', no_log = True),
+ state = dict(default = 'present', choices = ['present', 'absent'], type = 'str')))
+
+ module = AnsibleModule(argument_spec = argument_spec, supports_check_mode = True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg = 'pyvmomi is required for this module')
+
+ vmware_folder = VMwareFolder(module)
+ vmware_folder.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deployment/playbooks/library/vmware_resource_pool.py b/deployment/playbooks/library/vmware_resource_pool.py
new file mode 100644
index 00000000..b4b891ee
--- /dev/null
+++ b/deployment/playbooks/library/vmware_resource_pool.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Davis Phillips davis.phillips@gmail.com
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_resource_pool
+short_description: Add/remove resource pools to/from vCenter
+description:
+ - This module can be used to add/remove a resource pool to/from vCenter
+version_added: 2.3
+author: "Davis Phillips (@dav1x)"
+notes:
+ - Tested on vSphere 6.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter:
+ description:
+ - Name of the datacenter to add the host
+ required: True
+ cluster:
+ description:
+ - Name of the cluster to add the host
+ required: True
+ resource_pool:
+ description:
+ - Resource pool name to manage
+ required: True
+ hostname:
+ description:
+ - ESXi hostname to manage
+ required: True
+ username:
+ description:
+ - ESXi username
+ required: True
+ password:
+ description:
+ - ESXi password
+ required: True
+ cpu_expandable_reservations:
+ description:
+ - In a resource pool with an expandable reservation, the reservation on a resource pool can grow beyond the specified value.
+ default: True
+ cpu_reservation:
+ description:
+ - Amount of resource that is guaranteed available to the virtual machine or resource pool.
+ default: 0
+ cpu_limit:
+ description:
+ - The utilization of a virtual machine/resource pool will not exceed this limit, even if there are available resources.
+ default: -1 (No limit)
+ cpu_shares:
+ description:
+ - Memory shares are used in case of resource contention.
+ choices:
+ - high
+ - custom
+ - low
+ - normal
+ default: Normal
+ mem_expandable_reservations:
+ description:
+ - In a resource pool with an expandable reservation, the reservation on a resource pool can grow beyond the specified value.
+ default: True
+ mem_reservation:
+ description:
+ - Amount of resource that is guaranteed available to the virtual machine or resource pool.
+ default: 0
+ mem_limit:
+ description:
+ - The utilization of a virtual machine/resource pool will not exceed this limit, even if there are available resources.
+ default: -1 (No limit)
+ mem_shares:
+ description:
+ - Memory shares are used in case of resource contention.
+ choices:
+ - high
+ - custom
+ - low
+ - normal
+ default: Normal
+ state:
+ description:
+ - Add or remove the resource pool
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Create a resource pool
+ - name: Add resource pool to vCenter
+ vmware_resource_pool:
+ hostname: vcsa_host
+ username: vcsa_user
+ password: vcsa_pass
+ datacenter: datacenter
+ cluster: cluster
+ resource_pool: resource_pool
+ mem_shares: normal
+ mem_limit: -1
+ mem_reservation: 0
+ mem_expandable_reservations: True
+ cpu_shares: normal
+ cpu_limit: -1
+ cpu_reservation: 0
+ cpu_expandable_reservations: True
+ state: present
+'''
+
+RETURN = """
+instance:
+ descripton: metadata about the new resource pool
+ returned: always
+ type: dict
+ sample: None
+"""
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils.vmware import get_all_objs, connect_to_api, vmware_argument_spec, find_datacenter_by_name, \
+ find_cluster_by_name_datacenter, wait_for_task
+from ansible.module_utils.basic import AnsibleModule
+
+class VMwareResourcePool(object):
+ def __init__(self, module):
+ self.module = module
+ self.datacenter = module.params['datacenter']
+ self.cluster = module.params['cluster']
+ self.resource_pool = module.params['resource_pool']
+ self.hostname = module.params['hostname']
+ self.username = module.params['username']
+ self.password = module.params['password']
+ self.state = module.params['state']
+ self.mem_shares = module.params['mem_shares']
+ self.mem_limit = module.params['mem_limit']
+ self.mem_reservation = module.params['mem_reservation']
+ self.mem_expandable_reservations = module.params['cpu_expandable_reservations']
+ self.cpu_shares = module.params['cpu_shares']
+ self.cpu_limit = module.params['cpu_limit']
+ self.cpu_reservation = module.params['cpu_reservation']
+ self.cpu_expandable_reservations = module.params['cpu_expandable_reservations']
+ self.dc_obj = None
+ self.cluster_obj = None
+ self.host_obj = None
+ self.resource_pool_obj = None
+ self.content = connect_to_api(module)
+
+ def find_host_by_cluster_datacenter(self):
+ self.dc_obj = find_datacenter_by_name(self.content, self.datacenter)
+ self.cluster_obj = find_cluster_by_name_datacenter(self.dc_obj, self.cluster)
+
+ for host in self.cluster_obj.host:
+ if host.name == self.hostname:
+ return host, self.cluster
+
+ return None, self.cluster
+
+
+ def select_resource_pool(self, host):
+ pool_obj = None
+
+ resource_pools = get_all_objs(self.content, [vim.ResourcePool])
+
+ pool_selections = self.get_obj(
+ [vim.ResourcePool],
+ self.resource_pool,
+ return_all = True
+ )
+ if pool_selections:
+ for p in pool_selections:
+ if p in resource_pools:
+ pool_obj = p
+ break
+ return pool_obj
+
+ def get_obj(self, vimtype, name, return_all = False):
+ obj = list()
+ container = self.content.viewManager.CreateContainerView(
+ self.content.rootFolder, vimtype, True)
+
+ for c in container.view:
+ if name in [c.name, c._GetMoId()]:
+ if return_all is False:
+ return c
+ break
+ else:
+ obj.append(c)
+
+ if len(obj) > 0:
+ return obj
+ else:
+ # for backwards-compat
+ return None
+
+ def process_state(self):
+ try:
+ rp_states = {
+ 'absent': {
+ 'present': self.state_remove_rp,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_add_rp,
+ }
+ }
+
+ rp_states[self.state][self.check_rp_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg = runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg = method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg = str(e))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed = False)
+
+ def state_remove_rp(self):
+ changed = True
+ result = None
+ resource_pool = self.select_resource_pool(self.host_obj)
+ try:
+ task = self.resource_pool_obj.Destroy()
+ success, result = wait_for_task(task)
+
+ except:
+ self.module.fail_json(msg = "Failed to remove resource pool '%s' '%s'" % (self.resource_pool,resource_pool))
+ self.module.exit_json(changed = changed, result = str(result))
+
+ def state_add_rp(self):
+ changed = True
+ result = None
+ root_resource_pool = None
+
+ rp_spec=vim.ResourceConfigSpec()
+ cpu_alloc=vim.ResourceAllocationInfo()
+ cpu_alloc.expandableReservation = self.cpu_expandable_reservations
+ cpu_alloc.limit = int(self.cpu_limit)
+ cpu_alloc.reservation = int(self.cpu_reservation)
+ cpu_alloc_shares = vim.SharesInfo()
+ cpu_alloc_shares.level = self.cpu_shares
+ cpu_alloc.shares = cpu_alloc_shares
+ rp_spec.cpuAllocation = cpu_alloc
+ mem_alloc = vim.ResourceAllocationInfo()
+ mem_alloc.limit = int(self.mem_limit)
+ mem_alloc.expandableReservation = self.mem_expandable_reservations
+ mem_alloc.reservation = int(self.mem_reservation)
+ mem_alloc_shares = vim.SharesInfo()
+ mem_alloc_shares.level = self.mem_shares
+ mem_alloc.shares = mem_alloc_shares
+ rp_spec.memoryAllocation = mem_alloc
+
+ self.dc_obj = find_datacenter_by_name(self.content, self.datacenter)
+ self.cluster_obj = find_cluster_by_name_datacenter(self.dc_obj, self.cluster)
+ rootResourcePool = self.cluster_obj.resourcePool
+ task = rootResourcePool.CreateResourcePool(self.resource_pool, rp_spec)
+
+ self.module.exit_json(changed = changed)
+
+ def check_rp_state(self):
+
+ self.host_obj, self.cluster_obj = self.find_host_by_cluster_datacenter()
+ self.resource_pool_obj = self.select_resource_pool(self.host_obj)
+
+ if self.resource_pool_obj is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter = dict(required = True, type = 'str'),
+ cluster = dict(required = True, type = 'str'),
+ resource_pool = dict(required=True, type='str'),
+ hostname = dict(required = True, type = 'str'),
+ username = dict(required = True, type = 'str'),
+ password = dict(required = True, type = 'str', no_log = True),
+ mem_shares = dict(type = 'str', default = "normal", choices = ['high','custom','normal', 'low']),
+ mem_limit = dict(type = 'int',default = "-1"),
+ mem_reservation = dict(type = 'int',default = "0"),
+ mem_expandable_reservations = dict(type = 'bool',default = "True"),
+ cpu_shares = dict(type = 'str', default = "normal", choices = ['high','custom','normal', 'low']),
+ cpu_limit = dict(type = 'int',default = "-1"),
+ cpu_reservation = dict(type = 'int',default = "0"),
+ cpu_expandable_reservations = dict(type = 'bool',default = "True"),
+ state = dict(default = 'present', choices = ['present', 'absent'], type = 'str')))
+
+ module = AnsibleModule(argument_spec = argument_spec, supports_check_mode = True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg = 'pyvmomi is required for this module')
+
+ vmware_rp = VMwareResourcePool(module)
+ vmware_rp.process_state()
+
+if __name__ == '__main__':
+ main()
diff --git a/deployment/playbooks/node-setup.yaml b/deployment/playbooks/node-setup.yaml
new file mode 100644
index 00000000..f2f531d3
--- /dev/null
+++ b/deployment/playbooks/node-setup.yaml
@@ -0,0 +1,47 @@
+---
+- include: "scaleup.yaml"
+ vars:
+ debug_level: 2
+ openshift_debug_level: "{{ debug_level }}"
+ openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
+ osm_controller_args:
+ osm_api_server_args:
+ openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
+ openshift_master_access_token_max_seconds: 2419200
+ openshift_master_api_port: "{{ console_port }}"
+ openshift_master_console_port: "{{ console_port }}"
+ osm_cluster_network_cidr: 172.16.0.0/16
+ openshift_registry_selector: "role=compute"
+ openshift_router_selector: "role=compute"
+ openshift_node_local_quota_per_fsgroup: 512Mi
+ openshift_master_cluster_method: native
+ openshift_cloudprovider_kind: vsphere
+ openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
+ openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
+ openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
+ openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
+ openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
+ openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
+ os_sdn_network_plugin_name: "{{ openshift_sdn }}"
+ deployment_type: "{{ deployment_type }}"
+ load_balancer_hostname: "{{ lb_host }}"
+ openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
+ openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
+ # 'openshift_node_groups' is required for OCP3.10
+ openshift_node_groups:
+ - name: node-config-master
+ labels:
+ - 'node-role.kubernetes.io/master=true'
+ - 'role=master'
+ edits: []
+ - name: node-config-compute
+ labels:
+ - 'node-role.kubernetes.io/compute=true'
+ - 'node-role.kubernetes.io/infra=true'
+ - 'role=compute'
+ edits: []
+ - name: node-config-storage
+ labels:
+ - 'node-role.kubernetes.io/storage=true'
+ - 'role=storage'
+ edits: []
diff --git a/deployment/playbooks/noop.yaml b/deployment/playbooks/noop.yaml
new file mode 100644
index 00000000..94173aed
--- /dev/null
+++ b/deployment/playbooks/noop.yaml
@@ -0,0 +1,7 @@
+---
+- hosts: localhost
+ gather_facts: no
+ ignore_errors: no
+ tasks:
+ - debug:
+ msg: "No operation TASK for placeholder playbook."
diff --git a/deployment/playbooks/ocp-configure.yaml b/deployment/playbooks/ocp-configure.yaml
new file mode 100644
index 00000000..7a59f9ed
--- /dev/null
+++ b/deployment/playbooks/ocp-configure.yaml
@@ -0,0 +1,33 @@
+---
+- hosts: localhost
+ gather_facts: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ # Group systems
+ - instance-groups
+
+- hosts: master
+ gather_facts: yes
+ vars_files:
+ - vars/main.yaml
+ tasks:
+ - name: Enable Gluster 3 repo
+ import_role:
+ name: enable-gluster-repo
+ - name: Install heketi client for CNS and CRS needs
+ package:
+ name: heketi-client
+ state: latest
+ retries: 5
+ delay: 5
+ register: result
+ until: result is succeeded
+
+- hosts: single_master
+ gather_facts: no
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - instance-groups
+ - storage-class-configure
diff --git a/deployment/playbooks/ocp-end-to-end.yaml b/deployment/playbooks/ocp-end-to-end.yaml
new file mode 100644
index 00000000..58f0ca01
--- /dev/null
+++ b/deployment/playbooks/ocp-end-to-end.yaml
@@ -0,0 +1,15 @@
+---
+- include: setup.yaml
+ tags: ['setup']
+
+- include: prod.yaml
+ tags: ['prod']
+
+- include: ocp-install.yaml
+ tags: ['ocp-install']
+
+- include: ocp-configure.yaml
+ tags: ['ocp-configure']
+
+- include: clean.yaml
+ tags: ['clean']
diff --git a/deployment/playbooks/ocp-install.yaml b/deployment/playbooks/ocp-install.yaml
new file mode 100644
index 00000000..d8a5109e
--- /dev/null
+++ b/deployment/playbooks/ocp-install.yaml
@@ -0,0 +1,138 @@
+---
+- hosts: localhost
+ gather_facts: yes
+ ignore_errors: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ # Group systems
+ - instance-groups
+
+- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
+ 'prerequisite.yaml',
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
+ ) }}"
+ vars:
+ # 'openshift_node_groups' is required for OCP3.10
+ openshift_node_groups:
+ - name: node-config-master
+ labels:
+ - 'node-role.kubernetes.io/master=true'
+ - 'role=master'
+ edits: []
+ - name: node-config-compute
+ labels:
+ - 'node-role.kubernetes.io/compute=true'
+ - 'node-role.kubernetes.io/infra=true'
+ - 'role=compute'
+ edits: []
+ - name: node-config-storage
+ labels:
+ - 'node-role.kubernetes.io/storage=true'
+ - 'role=storage'
+ edits: []
+
+- name: call openshift includes for installer
+ include: "{{
+ lookup('env', 'VIRTUAL_ENV')
+ }}/usr/share/ansible/openshift-ansible/playbooks/{{
+ (openshift_vers in ['v3_6', 'v3_7']) |
+ ternary('byo/config.yml', 'deploy_cluster.yml')
+ }}"
+ vars:
+ openshift_release: "v3.{{ openshift_vers.split('_')[-1] }}"
+ debug_level: 2
+ console_port: 8443
+ openshift_debug_level: "{{ debug_level }}"
+ openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
+ # NOTE(vponomar): following two can be changed to "true" when
+ # https://github.com/openshift/openshift-ansible/issues/6086 is fixed
+ openshift_enable_service_catalog: false
+ template_service_broker_install: false
+ osm_controller_args:
+ feature-gates:
+ - "ExpandPersistentVolumes=true"
+ cloud-provider:
+ - "vsphere"
+ cloud-config:
+ - "/etc/origin/cloudprovider/vsphere.conf"
+ osm_api_server_args:
+ feature-gates:
+ - "ExpandPersistentVolumes=true"
+ cloud-provider:
+ - "vsphere"
+ cloud-config:
+ - "/etc/origin/cloudprovider/vsphere.conf"
+ openshift_master_admission_plugin_config:
+ PersistentVolumeClaimResize:
+ configuration:
+ apiVersion: v1
+ disable: false
+ kind: DefaultAdmissionConfig
+ openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
+ openshift_master_access_token_max_seconds: 2419200
+ openshift_hosted_router_replicas: 1
+ openshift_hosted_registry_replicas: 1
+ openshift_master_api_port: "{{ console_port }}"
+ openshift_master_console_port: "{{ console_port }}"
+ openshift_node_local_quota_per_fsgroup: 512Mi
+ osm_cluster_network_cidr: 172.16.0.0/16
+ osm_use_cockpit: false
+ osm_default_node_selector: "role=compute"
+ openshift_registry_selector: "role=compute"
+ openshift_override_hostname_check: true
+ openshift_router_selector: "role=compute"
+ openshift_master_cluster_method: native
+ openshift_cloudprovider_kind: vsphere
+ openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
+ openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
+ openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
+ openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
+ openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
+ openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
+ wildcard_zone: "{{ app_dns_prefix }}.{{ dns_zone }}"
+ osm_default_subdomain: "{{ wildcard_zone }}"
+ openshift_master_default_subdomain: "{{osm_default_subdomain}}"
+ deployment_type: "{{ deployment_type }}"
+ load_balancer_hostname: "{{ lb_host }}"
+ openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
+ openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
+ os_sdn_network_plugin_name: "{{ openshift_sdn }}"
+ openshift_master_identity_providers:
+ - name: 'allow_all'
+ kind: 'AllowAllPasswordIdentityProvider'
+ login: True
+ challenge: True
+ # 'openshift_node_groups' is required for OCP3.10
+ openshift_node_groups:
+ - name: node-config-master
+ labels:
+ - 'node-role.kubernetes.io/master=true'
+ - 'role=master'
+ edits: []
+ - name: node-config-compute
+ labels:
+ - 'node-role.kubernetes.io/compute=true'
+ - 'node-role.kubernetes.io/infra=true'
+ - 'role=compute'
+ edits: []
+ - name: node-config-storage
+ labels:
+ - 'node-role.kubernetes.io/storage=true'
+ - 'role=storage'
+ edits: []
+
+- hosts: allnodes
+ gather_facts: no
+ ignore_errors: no
+ tasks:
+ - service:
+ name: dnsmasq
+ state: restarted
+
+- name: Run yum_update command on all the nodes and then reboot them
+ hosts: localhost
+ gather_facts: no
+ roles:
+ - yum-update-and-reboot
diff --git a/deployment/playbooks/prerequisite.yaml b/deployment/playbooks/prerequisite.yaml
new file mode 100644
index 00000000..5c7cc399
--- /dev/null
+++ b/deployment/playbooks/prerequisite.yaml
@@ -0,0 +1,26 @@
+---
+- hosts: cluster_hosts
+ gather_facts: yes
+ become: yes
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - instance-groups
+ - package-repos
+
+- hosts: cluster_hosts
+ gather_facts: no
+ vars_files:
+ - vars/main.yaml
+ become: yes
+ roles:
+ - prerequisites
+
+- hosts: master
+ gather_facts: yes
+ vars_files:
+ - vars/main.yaml
+ become: yes
+ roles:
+ - master-prerequisites
+ - etcd-storage
diff --git a/deployment/playbooks/prod-ose-cns.yaml b/deployment/playbooks/prod-ose-cns.yaml
new file mode 100644
index 00000000..80a85f11
--- /dev/null
+++ b/deployment/playbooks/prod-ose-cns.yaml
@@ -0,0 +1,11 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: yes
+ become: no
+ vars_files:
+ - vars/main.yaml
+ roles:
+ # Group systems
+ - create-vm-cns-prod-ose
+ - setup-custom-domain-names-for-ansible-runner
diff --git a/deployment/playbooks/prod-ose-crs.yaml b/deployment/playbooks/prod-ose-crs.yaml
new file mode 100644
index 00000000..aa9537ab
--- /dev/null
+++ b/deployment/playbooks/prod-ose-crs.yaml
@@ -0,0 +1,11 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: yes
+ become: no
+ vars_files:
+ - vars/main.yaml
+ roles:
+ # Group systems
+ - create-vm-crs-prod-ose
+ - setup-custom-domain-names-for-ansible-runner
diff --git a/deployment/playbooks/prod.yaml b/deployment/playbooks/prod.yaml
new file mode 100644
index 00000000..3558468d
--- /dev/null
+++ b/deployment/playbooks/prod.yaml
@@ -0,0 +1,20 @@
+---
+- hosts: localhost
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - create-vm-prod-ose
+ - setup-custom-domain-names-for-ansible-runner
+
+- name: fulfill OSE3 prerequisites on production hosts roles
+ hosts: production_group
+ vars_files:
+ - vars/main.yaml
+ roles:
+ - setup-custom-domain-names
+ - package-repos
+ - vmware-guest-setup
+ - cloud-provider-setup
+ - docker-storage-setup
+ - openshift-volume-quota
+ ignore_errors: yes
diff --git a/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
new file mode 100644
index 00000000..1b93ce22
--- /dev/null
+++ b/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+- name: create /etc/origin/cloudprovider
+ file:
+ state: directory
+ path: "{{ vsphere_conf_dir }}"
+
+- name: create the vsphere.conf file
+ template:
+ src: "{{ role_path }}/templates/vsphere.conf.j2"
+ dest: /etc/origin/cloudprovider/vsphere.conf
+ owner: root
+ group: root
+ mode: 0644
diff --git a/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2 b/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
new file mode 100644
index 00000000..8abe6e8c
--- /dev/null
+++ b/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
@@ -0,0 +1,11 @@
+[Global]
+user = "{{ vcenter_username }}"
+password = "{{ vcenter_password }}"
+server = "{{ vcenter_host }}"
+port = 443
+insecure-flag = 1
+datacenter = {{ vcenter_datacenter }}
+datastore = {{ vcenter_datastore }}
+working-dir = /{{ vcenter_datacenter }}/vm/{{ vcenter_folder }}/
+[Disk]
+scsicontrollertype = pvscsi
diff --git a/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
new file mode 100644
index 00000000..81511c01
--- /dev/null
+++ b/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
@@ -0,0 +1,3 @@
+---
+vsphere_conf_dir: /etc/origin/cloudprovider
+vsphere_conf: "{{vsphere_conf_dir }}/vsphere.conf"
diff --git a/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..392b5da1
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: Add following nodes to the 'new_nodes' group
+ set_fact:
+ is_add_nodes: true
+
+- name: Import common node creation role
+ import_role:
+ name: create-vm-prod-ose
diff --git a/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..e01f1dd0
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
@@ -0,0 +1,142 @@
+---
+- name: Define set of main disks (system and heketi)
+ set_fact:
+ disks_info: "{{ disks_info | default([
+ {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
+ }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ container_storage_disks.split(',') }}"
+
+- name: Define set of additional disks which will be just attached to nodes
+ set_fact:
+ additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
+
+- name: Create CNS production VMs on vCenter
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: "{{ item.value.guestname }}"
+ cluster: "{{ vcenter_cluster}}"
+ datacenter: "{{ vcenter_datacenter }}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ template: "{{vcenter_template_name}}"
+ state: poweredon
+ wait_for_ip_address: true
+ folder: "/{{ vcenter_folder }}"
+ annotation: "{{ item.value.tag }}"
+ disk: "{{ disks_info }} + {{ additional_disks_info }}"
+ hardware:
+ memory_mb: 32768
+ networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
+ customization:
+ domain: "{{dns_zone}}"
+ dns_suffix: "{{dns_zone}}"
+ hostname: "{{ item.value.guestname}}"
+ with_dict: "{{host_inventory}}"
+ when: "item.value.guesttype in ['cns', ]"
+ async: "{{ 6 * 600 }}"
+ poll: 0
+ register: async_vms_creation
+
+- name: Check async status of VMs creation
+ async_status:
+ jid: "{{ async_result_item.ansible_job_id }}"
+ with_items: "{{ async_vms_creation.results }}"
+ loop_control:
+ loop_var: "async_result_item"
+ register: async_poll_results
+ until: async_poll_results.finished
+ retries: "{{ 6 * 100 }}"
+
+- name: Read info of newly created VMs
+ vmware_guest_tools_wait:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ vcenter_folder }}"
+ validate_certs: False
+ uuid: "{{ item.instance.hw_product_uuid }}"
+ with_items: "{{ async_poll_results.results }}"
+ register: facts
+
+- name: Map node names and their IP addresses
+ set_fact:
+ ip4addrs: "{{ ip4addrs | default({}) | combine(
+ {item.instance.hw_name: (
+ item.instance.hw_eth0.ipaddresses | ipv4 | first)},
+ recursive=True) }}"
+ hostnames_for_reboot: "{{
+ (hostnames_for_reboot | default([])) +
+ [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
+ with_items: "{{ facts.results }}"
+
+- name: Define glusterfs devices
+ set_fact:
+ glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
+ {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
+ with_indexed_items: "{{ disks_info[3::] }}"
+
+- name: Define glusterfs additional devices
+ set_fact:
+ glusterfs_additional_devices: "{{
+ glusterfs_additional_devices | default([])
+ }} + {{
+ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
+ }}"
+ with_indexed_items: "{{ additional_disks_info }}"
+
+- name: Add CNS production VMs to inventory
+ add_host:
+ hostname: "{{ item.value.guestname }}"
+ ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
+ ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
+ groups: "{{ item.value.tag }}, new_nodes, storage, cns, glusterfs"
+ openshift_node_group_name: "node-config-storage"
+ # Following vars are for 'openshift_storage_glusterfs' role from
+ # 'openshift/openshift-ansible' repo
+ glusterfs_devices: "{{ glusterfs_devices }}"
+ glusterfs_hostname: "{{ item.value.guestname }}"
+ glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
+ glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['cns', ]"
+
+# Following updates config file
+# which is required for automated tests from 'glusterfs-containers-tests' repo
+
+- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
+ set_fact:
+ gluster_servers: "{{
+ gluster_servers | default({}) | combine({
+ ip4addrs[item.value.guestname]: {
+ 'manage': item.value.guestname,
+ 'storage': ip4addrs[item.value.guestname],
+ 'additional_devices': glusterfs_additional_devices,
+ }
+ })
+ }}"
+ with_dict: "{{ host_inventory }}"
+ when:
+ - item.value.guesttype in ['cns', ]
+ - cns_automation_config_file_path | length > 0
+
+- name: Update 'glusterfs-containers-tests' config file
+ yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: gluster_servers
+ value: "{{ gluster_servers }}"
+ when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..05aa63bb
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
@@ -0,0 +1,143 @@
+---
+- name: Define set of main disks (system and heketi)
+ set_fact:
+ disks_info: "{{ disks_info | default([
+ {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
+ {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
+ }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ container_storage_disks.split(',') }}"
+
+- name: Define set of additional disks which will be just attached to nodes
+ set_fact:
+ additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
+ [{'size_gb': (item.strip() | int),
+ 'type': container_storage_disk_type,
+ 'datastore': vcenter_datastore}]
+ }}"
+ with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
+
+- name: Create CRS production VMs on vCenter
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: "{{ item.value.guestname }}"
+ cluster: "{{ vcenter_cluster}}"
+ datacenter: "{{ vcenter_datacenter }}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ template: "{{vcenter_template_name}}"
+ state: poweredon
+ wait_for_ip_address: true
+ folder: "/{{ vcenter_folder }}"
+ annotation: "{{ cluster_id }}-crs"
+ disk: "{{ disks_info }} + {{ additional_disks_info }}"
+ hardware:
+ memory_mb: 32768
+ networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
+ customization:
+ domain: "{{dns_zone}}"
+ dns_suffix: "{{dns_zone}}"
+ hostname: "{{ item.value.guestname}}"
+ with_dict: "{{host_inventory}}"
+ when: "item.value.guesttype in ['crs', ]"
+ async: "{{ 6 * 600 }}"
+ poll: 0
+ register: async_vms_creation
+
+- name: Check async status of VMs creation
+ async_status:
+ jid: "{{ async_result_item.ansible_job_id }}"
+ with_items: "{{ async_vms_creation.results }}"
+ loop_control:
+ loop_var: "async_result_item"
+ register: async_poll_results
+ until: async_poll_results.finished
+ retries: "{{ 6 * 100 }}"
+
+- name: Read info of newly created VMs
+ vmware_guest_tools_wait:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ vcenter_folder }}"
+ validate_certs: False
+ uuid: "{{ item.instance.hw_product_uuid }}"
+ with_items: "{{ async_poll_results.results }}"
+ register: facts
+
+- name: Map node names and their IP addresses
+ set_fact:
+ ip4addrs: "{{ ip4addrs | default({}) | combine(
+ {item.instance.hw_name: (
+ item.instance.hw_eth0.ipaddresses | ipv4 | first)},
+ recursive=True) }}"
+ hostnames_for_reboot: "{{
+ (hostnames_for_reboot | default([])) +
+ [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
+ with_items: "{{ facts.results }}"
+
+- name: Define glusterfs devices
+ set_fact:
+ glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
+ {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
+ with_indexed_items: "{{ disks_info[3::] }}"
+
+- name: Define glusterfs additional devices
+ set_fact:
+ glusterfs_additional_devices: "{{
+ glusterfs_additional_devices | default([])
+ }} + {{
+ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
+ }}"
+ with_indexed_items: "{{ additional_disks_info }}"
+
+- name: Add CRS production VMs to inventory
+ add_host:
+ hostname: "{{ item.value.guestname }}"
+ ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
+ ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
+ openshift_node_group_name: "node-config-storage"
+ # old groups are: crs, production_group, {{cluster-id}}-crs
+ groups: "{{ cluster_id }}-crs, crs, storage, glusterfs"
+ # Following vars are for 'openshift_storage_glusterfs' role from
+ # 'openshift/openshift-ansible' repo
+ glusterfs_devices: "{{ glusterfs_devices }}"
+ glusterfs_hostname: "{{ item.value.guestname }}"
+ glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
+ glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['crs', ]"
+
+# Following updates config file
+# which is required for automated tests from 'glusterfs-containers-tests' repo
+
+- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
+ set_fact:
+ gluster_servers: "{{
+ gluster_servers | default({}) | combine({
+ ip4addrs[item.value.guestname]: {
+ 'manage': item.value.guestname,
+ 'storage': ip4addrs[item.value.guestname],
+ 'additional_devices': glusterfs_additional_devices,
+ }
+ })
+ }}"
+ with_dict: "{{ host_inventory }}"
+ when:
+ - item.value.guesttype in ['crs', ]
+ - cns_automation_config_file_path | length > 0
+
+- name: Update 'glusterfs-containers-tests' config file
+ yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: gluster_servers
+ value: "{{ gluster_servers }}"
+ when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
new file mode 100644
index 00000000..a0124348
--- /dev/null
+++ b/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
@@ -0,0 +1,157 @@
+---
+- name: Get to know whether we need to add following nodes to "new_nodes" group or not
+ set_fact:
+ is_add_nodes: "{{ is_add_nodes | default(false) }}"
+
+- name: Define memory and disk parameters per node type
+ set_fact:
+ host_data:
+ master:
+ memory: 16384
+ disk:
+ - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ compute:
+ memory: "{{ ('cns' in container_storage) | ternary(32768, 8192) }}"
+ disk:
+ - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+ - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
+
+- name: Create production VMs on vCenter
+ vmware_guest:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: "{{ item.value.guestname }}"
+ cluster: "{{ vcenter_cluster}}"
+ datacenter: "{{ vcenter_datacenter }}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ template: "{{vcenter_template_name}}"
+ state: poweredon
+ wait_for_ip_address: true
+ folder: "/{{ vcenter_folder }}"
+ annotation: "{{ item.value.tag }}"
+ disk: "{{ host_data[item.value.guesttype].disk }}"
+ hardware:
+ memory_mb: "{{ host_data[item.value.guesttype].memory }}"
+ networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
+ customization:
+ domain: "{{dns_zone}}"
+ dns_suffix: "{{ dns_zone }}"
+ hostname: "{{ item.value.guestname }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['compute', 'master']"
+ async: "{{ 6 * 600 }}"
+ poll: 0
+ register: async_vms_creation
+
+- name: Check async status of VMs creation
+ async_status:
+ jid: "{{ async_result_item.ansible_job_id }}"
+ with_items: "{{ async_vms_creation.results }}"
+ loop_control:
+ loop_var: "async_result_item"
+ register: async_poll_results
+ until: async_poll_results.finished
+ retries: "{{ 6 * 100 }}"
+
+- name: Read info of newly created VMs
+ vmware_guest_tools_wait:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ vcenter_folder }}"
+ validate_certs: False
+ uuid: "{{ item.instance.hw_product_uuid }}"
+ with_items: "{{ async_poll_results.results }}"
+ register: facts
+
+- name: Map node names and their IP addresses
+ set_fact:
+ ip4addrs: "{{ ip4addrs | default({}) | combine(
+ {item.instance.hw_name: (
+ item.instance.hw_eth0.ipaddresses | ipv4 | first)},
+ recursive=True) }}"
+ hostnames_for_reboot: "{{
+ (hostnames_for_reboot | default([])) +
+ [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
+ with_items: "{{ facts.results }}"
+
+- name: Add production VMs to inventory
+ add_host:
+ hostname: "{{ item.value.guestname }}"
+ ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
+ ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
+ groups: "{{ item.value.tag }}, production_group{{ is_add_nodes | ternary(', new_nodes', '')}}"
+ openshift_node_group_name: "{{
+ (item.value.guesttype == 'master') | ternary('node-config-master',
+ 'node-config-compute') }}"
+ with_dict: "{{ host_inventory }}"
+ when: "item.value.guesttype in ['compute', 'master']"
+
+# Following updates config file
+# which is required for automated tests from 'glusterfs-containers-tests' repo
+
+- name: Gather data about existing master nodes for tests config file
+ set_fact:
+ ocp_master_and_client_nodes: "{{
+ ocp_master_and_client_nodes | default({}) | combine({
+ (
+ ((
+ (hostvars[item].guest | default({'net': [{
+ 'network': vm_network,
+ 'ipaddress': [
+ ip4addrs[hostvars[item].inventory_hostname_short]
+ ]
+ }]})).net | selectattr('network', 'equalto', vm_network)
+ ) | list)[0].ipaddress | ipv4 | first
+ ): {
+ 'hostname': hostvars[item].inventory_hostname_short,
+ }
+ })
+ }}"
+ with_items: "{{ groups[cluster_id + '-master'] }}"
+ when: cns_automation_config_file_path | length > 0
+
+- name: Gather data about existing compute nodes for tests config file
+ set_fact:
+ ocp_compute_nodes: "{{
+ ocp_compute_nodes | default({}) | combine({
+ (
+ ((
+ (hostvars[item].guest | default({'net': [{
+ 'network': vm_network,
+ 'ipaddress': [
+ ip4addrs[hostvars[item].inventory_hostname_short]
+ ]
+ }]})).net | selectattr('network', 'equalto', vm_network)
+ ) | list)[0].ipaddress | ipv4 | first
+ ): {
+ 'hostname': hostvars[item].inventory_hostname_short,
+ }
+ })
+ }}"
+ with_items: "{{ groups[cluster_id + '-compute'] | default([]) }} "
+ when: cns_automation_config_file_path | length > 0
+
+- name: Update 'glusterfs-containers-tests' config file
+ yedit:
+ src: "{{ cns_automation_config_file_path }}"
+ state: present
+ edits:
+ - key: ocp_servers
+ value:
+ master: "{{ ocp_master_and_client_nodes }}"
+ client: "{{ ocp_master_and_client_nodes }}"
+ nodes: "{{ ocp_compute_nodes }}"
+ - key: openshift.heketi_config.heketi_client_node
+ value: "{{ ocp_master_and_client_nodes.keys()[0] }}"
+ - key: openshift.heketi_config.heketi_server_url
+ value: "http://{{ ocp_master_and_client_nodes.keys()[0] }}:8080"
+ when:
+ - ocp_master_and_client_nodes is defined
+ - ocp_compute_nodes is defined
diff --git a/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml b/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
new file mode 100644
index 00000000..dfe5e649
--- /dev/null
+++ b/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
@@ -0,0 +1,66 @@
+---
+- name: Clear yum cache
+ command: "yum clean all"
+ ignore_errors: true
+
+- name: Install required common rpms
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - 'iptables'
+ - 'iptables-services'
+ retries: 5
+ delay: 5
+ register: result
+ until: result is succeeded
+
+- name: Enable Gluster 3 repo
+ import_role:
+ name: enable-gluster-repo
+
+- name: Install required Gluster 3 rpms
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - 'redhat-storage-server'
+ - 'heketi-client'
+ retries: 5
+ delay: 5
+ register: result
+ until: result is succeeded
+
+- name: Install gluster-block package
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - 'gluster-block'
+ retries: 5
+ delay: 5
+ ignore_errors: yes
+
+- name: Stop firewalld
+ service:
+ name: firewalld
+ state: stopped
+ enabled: no
+
+- name: Start Glusterd and iptables
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: true
+ with_items:
+ - iptables
+ - glusterd
+
+- name: Start gluster-blockd service
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: true
+ with_items:
+ - gluster-blockd
+ ignore_errors: yes
diff --git a/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml b/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
new file mode 100644
index 00000000..062f543a
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+docker_dev: "/dev/sdb"
+docker_vg: "docker-vol"
+docker_data_size: "95%VG"
+docker_dm_basesize: "3G"
+container_root_lv_name: "dockerlv"
+container_root_lv_mount_path: "/var/lib/docker"
diff --git a/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml b/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
new file mode 100644
index 00000000..d8fd457e
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+- name: remove any existing docker-storage config file
+ file:
+ path: /etc/sysconfig/docker-storage
+ state: absent
+
+- block:
+ - name: create the docker-storage config file
+ template:
+ src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2"
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0644
+
+ when:
+ - ansible_distribution_version | version_compare('7.4', '>=')
+ - ansible_distribution == "RedHat"
+
+- block:
+ - name: create the docker-storage-setup config file
+ template:
+ src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0644
+
+ when:
+ - ansible_distribution_version | version_compare('7.4', '<')
+ - ansible_distribution == "RedHat"
+
+- name: start docker
+ service: name=docker state=started enabled=true
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
new file mode 100644
index 00000000..b5869fef
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
@@ -0,0 +1,4 @@
+DEVS="{{ docker_dev }}"
+VG="{{ docker_vg }}"
+DATA_SIZE="{{ docker_data_size }}"
+EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
new file mode 100644
index 00000000..61ba30af
--- /dev/null
+++ b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
@@ -0,0 +1,7 @@
+DEVS="{{ docker_dev }}"
+VG="{{ docker_vg }}"
+DATA_SIZE="{{ docker_data_size }}"
+STORAGE_DRIVER=overlay2
+CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
+CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
+CONTAINER_ROOT_LV_SIZE=100%FREE \ No newline at end of file
diff --git a/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml b/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
new file mode 100644
index 00000000..7236d77d
--- /dev/null
+++ b/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
@@ -0,0 +1,15 @@
+---
+- name: Enable main Gluster 3 repo with GA packages
+ command: "subscription-manager repos --enable=rh-gluster-3-for-rhel-7-server-rpms"
+# when: gluster_puddle_repo == ''
+
+- name: Create additional repo with downstream packages for Gluster 3
+ yum_repository:
+ name: "downstream-rh-gluster-3-for-rhel-7-server-rpms"
+ baseurl: "{{ gluster_puddle_repo }}"
+ description: "Downstream repo with development versions of packages for Gluster 3"
+ enabled: "yes"
+ gpgcheck: "no"
+ sslverify: "no"
+ cost: 990
+ when: gluster_puddle_repo != ''
diff --git a/deployment/playbooks/roles/etcd-storage/tasks/main.yaml b/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
new file mode 100644
index 00000000..fe13dc17
--- /dev/null
+++ b/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
@@ -0,0 +1,24 @@
+---
+- name: Create openshift volume group
+ lvg: vg=etcd_vg pvs=/dev/sdd
+
+- name: Create lvm volumes
+ lvol: vg=etcd_vg lv=etcd_lv size=95%FREE state=present shrink=no
+
+- name: Create local partition on lvm lv
+ filesystem:
+ fstype: xfs
+ dev: /dev/etcd_vg/etcd_lv
+
+- name: Make mounts owned by nfsnobody
+ file: path=/var/lib/etcd state=directory mode=0755
+
+- name: Mount the partition
+ mount:
+ name: /var/lib/etcd
+ src: /dev/etcd_vg/etcd_lv
+ fstype: xfs
+ state: present
+
+- name: Remount new partition
+ command: "mount -a"
diff --git a/deployment/playbooks/roles/gluster-ports/defaults/main.yaml b/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
new file mode 100644
index 00000000..fadcb096
--- /dev/null
+++ b/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+gluster_ports: ['24007', '24008', '2222', '49152:49664', '24010', '3260', '111']
+crs_ports: ['8080']
diff --git a/deployment/playbooks/roles/gluster-ports/tasks/main.yaml b/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
new file mode 100644
index 00000000..a3f0565b
--- /dev/null
+++ b/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+- name: open gluster ports
+ iptables:
+ chain: INPUT
+ destination_port: "{{ item }}"
+ jump: ACCEPT
+ ctstate: NEW
+ protocol: tcp
+ action: insert
+ match: tcp
+ with_items: "{{ gluster_ports }}"
+ when: groups['storage'] is defined and groups['storage'] != []
+ register: rule
+
+- name: save iptables
+ shell: iptables-save > /etc/sysconfig/iptables
+ when: rule|changed
+
+- name: open gluster ports
+ iptables:
+ chain: INPUT
+ destination_port: "{{ item }}"
+ ctstate: NEW
+ jump: ACCEPT
+ protocol: tcp
+ action: insert
+ match: tcp
+ with_items: "{{ crs_ports }}"
+ when: groups['crs'] is defined and groups['crs'] != []
+ register: heketi
+
+- name: save iptables
+ shell: iptables-save > /etc/sysconfig/iptables
+ when: heketi|changed
diff --git a/deployment/playbooks/roles/instance-groups/tasks/main.yaml b/deployment/playbooks/roles/instance-groups/tasks/main.yaml
new file mode 100644
index 00000000..f8da4217
--- /dev/null
+++ b/deployment/playbooks/roles/instance-groups/tasks/main.yaml
@@ -0,0 +1,155 @@
+---
+# create rhsm_user, rhsm_password, rhsm_subscription_pool and
+# rhsm_server for functionality with older rhsm_user
+- name: Set deprecated fact for rhel_subscription_user
+ set_fact:
+ rhsm_user: "{{ rhel_subscription_user }}"
+ when: rhel_subscription_user is defined
+
+- name: Set deprecated fact for rhel_subscription_pass
+ set_fact:
+ rhsm_password: "{{ rhel_subscription_pass }}"
+ when: rhel_subscription_pass is defined
+
+- name: Set deprecated fact for rhel_subscription_pool
+ set_fact:
+ rhsm_pool: "{{ rhel_subscription_pool }}"
+ when: rhel_subscription_pool is defined
+
+- name: Add masters to requisite groups
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, masters, etcd, nodes, cluster_hosts, master
+ openshift_node_group_name: "node-config-master"
+ with_items: "{{ groups[cluster_id + '-master'] }}"
+ when:
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add masters to requisite groups
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, masters, etcd, nodes, cluster_hosts, master
+ openshift_node_group_name: "node-config-master"
+ openshift_node_labels:
+ role: master
+ node-role.kubernetes.io/master: true
+ with_items: "{{ groups[cluster_id + '-master'] }}"
+ when:
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add a master to the single master group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: single_master
+ openshift_node_group_name: "node-config-master"
+ with_items: "{{ groups[cluster_id + '-master'][0] }}"
+ when:
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add a master to the single master group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: single_master
+ openshift_node_group_name: "node-config-master"
+ openshift_node_labels:
+ role: master
+ node-role.kubernetes.io/master: true
+ with_items: "{{ groups[cluster_id + '-master'][0] }}"
+ when:
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add compute instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute
+ openshift_node_group_name: "node-config-compute"
+ with_items: "{{ groups[cluster_id + '-compute'] }}"
+ when:
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add compute instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute
+ openshift_node_group_name: "node-config-compute"
+ openshift_node_labels:
+ role: compute
+ node-role.kubernetes.io/compute: true
+ node-role.kubernetes.io/infra: true
+ with_items: "{{ groups[cluster_id + '-compute'] }}"
+ when:
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add new node instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, new_nodes
+ openshift_node_group_name: "node-config-compute"
+ with_items: "{{ groups.tag_provision_node | default([]) }}"
+ when:
+ - add_node is defined
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add new node instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes, new_nodes
+ openshift_node_group_name: "node-config-compute"
+ openshift_node_labels:
+ role: "{{ node_type }}"
+ node-role.kubernetes.io/compute: true
+ node-role.kubernetes.io/infra: true
+ with_items: "{{ groups.tag_provision_node | default([]) }}"
+ when:
+ - add_node is defined
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add cns instances to allnodes
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-storage'] | default([]) }}"
+
+- name: Add crs instances to allnodes
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: allnodes
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-crs'] | default([]) }}"
+
+- name: Add cns instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: nodes, cluster_hosts, schedulable_nodes, storage
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-storage'] }}"
+ when:
+ - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
+ - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+- name: Add cns instances to host group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: nodes, cluster_hosts, schedulable_nodes, storage
+ openshift_node_labels:
+ role: storage
+ node-role.kubernetes.io/storage: true
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-storage'] }}"
+ when:
+ - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
+ - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
+
+- name: Add crs nodes to the storage group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: storage, crs
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-crs'] }}"
+ when:
+ - "'crs' in container_storage and add_node is defined and 'storage' in node_type"
+
+- name: Add a crs node to the single crs group
+ add_host:
+ name: "{{ hostvars[item].inventory_hostname }}"
+ groups: single_crs
+ openshift_node_group_name: "node-config-storage"
+ with_items: "{{ groups[cluster_id + '-crs'][0] }}"
+ when:
+ - "'crs' in container_storage and add_node is defined and 'storage' in node_type"
diff --git a/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml b/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
new file mode 100644
index 00000000..de9230d1
--- /dev/null
+++ b/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: Install git
+ package:
+ name: git
+ state: latest
+ when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
new file mode 100644
index 00000000..cd74c20e
--- /dev/null
+++ b/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+local_volumes_device: "/dev/sdc"
+local_volumes_fstype: "xfs"
+local_volumes_fsopts: "gquota"
+local_volumes_path: "/var/lib/origin/openshift.local.volumes"
diff --git a/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
new file mode 100644
index 00000000..df58fe80
--- /dev/null
+++ b/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Create filesystem for /var/lib/origin/openshift.local.volumes
+ filesystem:
+ fstype: "{{ local_volumes_fstype }}"
+ dev: "{{ local_volumes_device }}"
+
+- name: Create local volumes directory
+ file:
+ path: "{{ local_volumes_path }}"
+ state: directory
+ recurse: yes
+
+- name: Create fstab entry
+ mount:
+ name: "{{ local_volumes_path }}"
+ src: "{{ local_volumes_device }}"
+ fstype: "{{ local_volumes_fstype }}"
+ opts: "{{ local_volumes_fsopts }}"
+ state: present
+
+- name: Mount fstab entry
+ mount:
+ name: "{{ local_volumes_path }}"
+ src: "{{ local_volumes_device }}"
+ fstype: "{{ local_volumes_fstype }}"
+ opts: "{{ local_volumes_fsopts }}"
+ state: mounted
diff --git a/deployment/playbooks/roles/package-repos/tasks/main.yaml b/deployment/playbooks/roles/package-repos/tasks/main.yaml
new file mode 100644
index 00000000..3492a9e4
--- /dev/null
+++ b/deployment/playbooks/roles/package-repos/tasks/main.yaml
@@ -0,0 +1,23 @@
+---
+- name: Import RHSM role
+ import_role:
+ name: rhsm
+
+- name: Evaluate OCP repo name
+ set_fact:
+ tmp_ose_repo_name: "rhel-7-server-ose-3.{{ openshift_vers.split('_')[-1] }}-rpms"
+
+- name: Disable OpenShift 3.X GA repo
+ command: "subscription-manager repos --disable={{ tmp_ose_repo_name }}"
+ when: (ose_puddle_repo != '') or ('crs' in group_names)
+
+- name: Create additional repo with downstream packages for OpenShift 3.X
+ yum_repository:
+ name: "downstream-{{ tmp_ose_repo_name }}"
+ baseurl: "{{ ose_puddle_repo }}"
+ description: "Downstream repo with development versions of packages for OpenShift"
+ enabled: "{{ (ose_puddle_repo != '') | ternary('yes', 'no') }}"
+ gpgcheck: "no"
+ sslverify: "no"
+ cost: 900
+ when: (ose_puddle_repo != '') and ('crs' not in group_names)
diff --git a/deployment/playbooks/roles/prerequisites/defaults/main.yaml b/deployment/playbooks/roles/prerequisites/defaults/main.yaml
new file mode 100644
index 00000000..1705ee4f
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/defaults/main.yaml
@@ -0,0 +1,6 @@
+---
+openshift_required_packages:
+- iptables
+- iptables-services
+- NetworkManager
+- docker{{ '-' + docker_version if docker_version is defined else '' }}
diff --git a/deployment/playbooks/roles/prerequisites/library/openshift_facts.py b/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
new file mode 120000
index 00000000..e0061bb7
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
@@ -0,0 +1 @@
+/usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py \ No newline at end of file
diff --git a/deployment/playbooks/roles/prerequisites/library/rpm_q.py b/deployment/playbooks/roles/prerequisites/library/rpm_q.py
new file mode 100644
index 00000000..3dec50fc
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/library/rpm_q.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tobias Florek <tob@butter.sh>
+# Licensed under the terms of the MIT License
+"""
+An ansible module to query the RPM database. For use, when yum/dnf are not
+available.
+"""
+
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import * # noqa: F403
+
+DOCUMENTATION = """
+---
+module: rpm_q
+short_description: Query the RPM database
+author: Tobias Florek
+options:
+ name:
+ description:
+ - The name of the package to query
+ required: true
+ state:
+ description:
+ - Whether the package is supposed to be installed or not
+ choices: [present, absent]
+ default: present
+"""
+
+EXAMPLES = """
+- rpm_q: name=ansible state=present
+- rpm_q: name=ansible state=absent
+"""
+
+RPM_BINARY = '/bin/rpm'
+
+
+def main():
+ """
+ Checks rpm -q for the named package and returns the installed packages
+ or None if not installed.
+ """
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # pylint: disable=invalid-name
+ rc, out, err = module.run_command([RPM_BINARY, '-q', name])
+
+ installed = out.rstrip('\n').split('\n')
+
+ if rc != 0:
+ if state == 'present':
+ module.fail_json(msg="%s is not installed" % name, stdout=out, stderr=err, rc=rc)
+ else:
+ module.exit_json(changed=False)
+ elif state == 'present':
+ module.exit_json(changed=False, installed_versions=installed)
+ else:
+ module.fail_json(msg="%s is installed", installed_versions=installed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deployment/playbooks/roles/prerequisites/tasks/main.yaml b/deployment/playbooks/roles/prerequisites/tasks/main.yaml
new file mode 100644
index 00000000..a2686796
--- /dev/null
+++ b/deployment/playbooks/roles/prerequisites/tasks/main.yaml
@@ -0,0 +1,84 @@
+---
+- name: Gather facts
+ openshift_facts:
+ role: common
+
+- block:
+ - name: Clear yum cache
+ command: "yum clean all"
+ ignore_errors: true
+
+ - name: Install the required rpms
+ package:
+ name: "{{ item }}"
+ state: latest
+ with_items: "{{ openshift_required_packages }}"
+
+ - name: Start NetworkManager and network
+ service:
+ name: "{{ item }}"
+ state: restarted
+ enabled: true
+ with_items:
+ - NetworkManager
+ - network
+
+ - name: Determine if firewalld is installed
+ rpm_q:
+ name: "firewalld"
+ state: present
+ register: firewalld_installed
+ failed_when: false
+
+ - name: Stop firewalld
+ service:
+ name: firewalld
+ state: stopped
+ enabled: false
+ when:
+ - "{{ firewalld_installed.installed_versions | default([]) | length > 0 }}"
+
+ - name: Start iptables
+ service:
+ name: iptables
+ state: started
+ enabled: true
+
+ - name: Start docker
+ service:
+ name: docker
+ state: started
+ enabled: true
+
+ when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
+
+# Fail as early as possible if Atomic and old version of Docker
+- block:
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - assert:
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Attempting to patch.
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+
+ rescue:
+ - name: Patching Atomic instances
+ shell: atomic host upgrade
+ register: patched
+
+ - name: Reboot when patched
+ shell: sleep 5 && shutdown -r now "Reboot due to Atomic Patching"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ when: patched.changed
+
+ - name: Wait for hosts to be back
+ pause:
+ seconds: 60
+ delegate_to: 127.0.0.1
+ when: patched.changed
+
+ when: (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml b/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
new file mode 100644
index 00000000..9b9f3b21
--- /dev/null
+++ b/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
@@ -0,0 +1,14 @@
+---
+- block:
+ - name: Is the host already registered?
+ command: "subscription-manager list"
+ register: subscribed
+ ignore_errors: yes
+
+ - name: Unregister host
+ redhat_subscription:
+ state: absent
+ when: "'Subscribed' in subscribed.stdout"
+ ignore_errors: yes
+
+ when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/rhsm/defaults/main.yaml b/deployment/playbooks/roles/rhsm/defaults/main.yaml
new file mode 100644
index 00000000..3207411f
--- /dev/null
+++ b/deployment/playbooks/roles/rhsm/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+openshift_required_repos:
+- 'rhel-7-server-rpms'
+- 'rhel-7-server-extras-rpms'
+- 'rhel-7-fast-datapath-rpms'
diff --git a/deployment/playbooks/roles/rhsm/tasks/main.yaml b/deployment/playbooks/roles/rhsm/tasks/main.yaml
new file mode 100644
index 00000000..f793fb2f
--- /dev/null
+++ b/deployment/playbooks/roles/rhsm/tasks/main.yaml
@@ -0,0 +1,49 @@
+---
+- block:
+ - name: Allow rhsm a longer timeout to help out with subscription-manager
+ lineinfile:
+ dest: /etc/rhsm/rhsm.conf
+ line: 'server_timeout=600'
+ insertafter: '^proxy_password ='
+
+ - name: Is the system already registered?
+ command: "subscription-manager version"
+ register: subscribed
+
+ - name: Unregister system if registered
+ import_role:
+ name: rhsm-unregister
+ when:
+ - "'not registered' not in subscribed.stdout"
+
+ - name: Register system using Red Hat Subscription Manager
+ redhat_subscription:
+ state: present
+ username: "{{ rhsm_user | default(omit) }}"
+ password: "{{ rhsm_password | default(omit) }}"
+ pool: "{{ rhsm_pool | default(omit) }}"
+ server_hostname: "{{ rhsm_satellite | default(omit) }}"
+ when:
+ - "'not registered' in subscribed.stdout"
+ - rhsm_user is defined
+ - rhsm_user|trim != ''
+ register: rhn
+ until: rhn|success
+ retries: 5
+
+ - name: Obtain currently enabled repos
+ shell: 'subscription-manager repos --list-enabled | sed -ne "s/^Repo ID:[^a-zA-Z0-9]*\(.*\)/\1/p"'
+ register: enabled_repos
+
+ - name: Disable repositories that should not be enabled
+ shell: "subscription-manager repos --disable={{ item }}"
+ with_items:
+ - "{{ enabled_repos.stdout_lines | difference(openshift_required_repos) }}"
+ when: provider is not defined
+
+ - name: Enable specified repositories not already enabled
+ command: "subscription-manager repos --enable={{ item }}"
+ with_items:
+ - "{{ openshift_required_repos | difference(enabled_repos.stdout_lines) }}"
+
+ when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
new file mode 100644
index 00000000..e9e06809
--- /dev/null
+++ b/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
@@ -0,0 +1,83 @@
+---
+# NOTE(vponomar): here we use 2 different sources of IP addresses:
+# 1) hostvars[item].guest.net exists for old nodes, that haven't been created
+# with this playbook run. Such nodes have detailed info in hostvars.
+# 2) hostvars[item].ansible_ssh_host is always correct IP address for newly
+# created nodes. For such nodes we pick it when variant 1 does not work.
+- name: Save matched hosts to temporary var
+ set_fact:
+ current_cluster_hosts: "{{
+ current_cluster_hosts | default([]) | union([{
+ 'name_short': hostvars[item].inventory_hostname_short,
+ 'name': hostvars[item].inventory_hostname,
+ 'net': (hostvars[item].guest | default({})).net | default(
+ [{'network': vm_network,
+ 'ipaddress': [hostvars[item].ansible_ssh_host]}])
+ }]) }}"
+ with_items: "{{ groups.all | select('match', ocp_hostname_prefix) | list }}"
+
+- name: Gather current cluster IP addresses
+ set_fact:
+ current_cluster_ips: "{{
+ current_cluster_ips | default({}) | combine({
+ (item.1.ipaddress | ipv4 | first): [item.0.name_short, item.0.name]
+ }) }}"
+ with_subelements: ["{{ current_cluster_hosts }}", net]
+ when: "item.1.network == vm_network"
+
+- name: Get current user home dir
+ shell: 'eval echo "~$USER"'
+ register: home_dir
+- name: Set hosts files paths
+ set_fact:
+ home_hosts_file: "{{ home_dir.stdout_lines[0] + '/.ssh/config' }}"
+ system_hosts_file: "/etc/hosts"
+- name: Check 'write' permissions for system hosts file
+ stat:
+ path: "{{ system_hosts_file }}"
+ register: stat_system_hosts
+
+- name: Update system hosts file if writeable
+ block:
+ - name: Delete old left-overs if exist
+ lineinfile:
+ dest: "{{ system_hosts_file }}"
+ regexp: '{{ item.name_short }}'
+ state: absent
+ create: true
+ with_items: "{{ current_cluster_hosts }}"
+ - name: Add domain name mapping of new cluster nodes to the system hosts file
+ lineinfile:
+ dest: "{{ system_hosts_file }}"
+ line: '{{ item.key }} {{ item.value.0 }} {{ item.value.1 }}'
+ create: true
+ with_dict: "{{ current_cluster_ips }}"
+ when: "stat_system_hosts.stat.writeable"
+
+- name: Update user's SSH hosts file
+ block:
+ - name: Delete old left-overs if exist
+ lineinfile:
+ path: "{{ home_hosts_file }}"
+ state: absent
+ regexp: "{{ item.key }}"
+ create: true
+ mode: '644'
+ with_dict: "{{ current_cluster_ips }}"
+ - name: Write line with option group
+ lineinfile:
+ dest: "{{ home_hosts_file }}"
+ state: present
+ line: "Host {{ item.value.0 }} {{ item.value.1 }}"
+ create: true
+ mode: '644'
+ with_dict: "{{ current_cluster_ips }}"
+ - name: Write line with hostname option
+ lineinfile:
+ dest: "{{ home_hosts_file }}"
+ state: present
+ line: " HostName {{ item.key }}"
+ insertafter: "Host {{ item.value.0 }} {{ item.value.1 }}"
+ create: true
+ mode: '644'
+ with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
new file mode 100644
index 00000000..d53fa43f
--- /dev/null
+++ b/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
@@ -0,0 +1,29 @@
+---
+- name: Import role with update of /etc/hosts file
+ import_role:
+ name: setup-custom-domain-names-for-ansible-runner
+
+- name: Create directory for dnsmasq config file if absent
+ file:
+ dest: /etc/dnsmasq.d
+ state: directory
+ mode: 0644
+
+- name: Create custom dnsmasq config file for current cluster
+ file:
+ dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
+ state: touch
+
+- name: Remove stale data from custom dnsmasq config file is exist
+ lineinfile:
+ dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
+ regexp: "{{ item.value.0 }}"
+ state: absent
+ with_dict: "{{ current_cluster_ips }}"
+
+- name: Write data to custom dnsmasq config file
+ lineinfile:
+ dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
+ line: "address=/{{ item.value.0 }}/{{ item.key }}\naddress=/{{ item.value.1 }}/{{ item.key }}"
+ state: present
+ with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml b/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
new file mode 100644
index 00000000..d42484e0
--- /dev/null
+++ b/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
@@ -0,0 +1,22 @@
+---
+- name: Copy cloud provider storage class file
+ template:
+ src: cloud-provider-storage-class.yaml.j2
+ dest: ~/cloud-provider-storage-class.yaml
+
+- name: Copy cloud provider storage class file to single master
+ fetch:
+ src: ~/cloud-provider-storage-class.yaml
+ dest: ~/cloud-provider-storage-class.yaml
+ flat: yes
+
+- name: Switch to default project
+ command: oc project default
+
+- name: Check to see if storage class is already created
+ command: "oc get storageclass"
+ register: storage_class
+
+- name: Create storage class
+ command: "oc create -f ~/cloud-provider-storage-class.yaml"
+ when: "'{{ vcenter_datastore }}' not in storage_class.stdout"
diff --git a/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2 b/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
new file mode 100644
index 00000000..e31d53a4
--- /dev/null
+++ b/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
@@ -0,0 +1,8 @@
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: "{{ vcenter_datastore }}"
+provisioner: kubernetes.io/vsphere-volume
+parameters:
+ diskformat: zeroedthick
+ datastore: "{{ vcenter_datastore }}"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
new file mode 100644
index 00000000..67898e0c
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: restart chronyd
+ service: name=chronyd state=restarted
+
+- name: restart networking
+ service: name=networking state=restarted
diff --git a/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
new file mode 100644
index 00000000..39dea695
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
@@ -0,0 +1,77 @@
+---
+- name: Determine if Atomic
+ stat: path=/run/ostree-booted
+ register: s
+ changed_when: false
+ check_mode: no
+
+- name: Init the is_atomic fact
+ set_fact:
+ is_atomic: false
+
+- name: Set the is_atomic fact
+ set_fact:
+ is_atomic: true
+ when: s.stat.exists
+
+- block:
+ - name: be sure all pre-req packages are installed
+ yum: name={{item}} state=installed
+ with_items:
+ - open-vm-tools
+ - PyYAML
+ - perl
+ - python-ipaddress
+ - net-tools
+ - chrony
+ - python-six
+ - iptables
+ - iptables-services
+ - docker{{ '-' + docker_version if docker_version is defined else '' }}
+ - dnsmasq
+ retries: 5
+ delay: 5
+ register: result
+ until: result is succeeded
+
+ - name: be sure openvmtools is running and enabled
+ service: name=vmtoolsd state=started enabled=yes
+
+ when:
+ - not is_atomic | bool
+ - ansible_distribution == "RedHat"
+
+- name: be sure chrony is configured
+ template: src=chrony.conf.j2 dest=/etc/chrony.conf
+ notify:
+ - restart chronyd
+
+- name: set link to localtime
+ command: timedatectl set-timezone {{timezone}}
+
+- name: be sure chronyd is running and enabled
+ service: name=chronyd state=started enabled=yes
+
+- block:
+ - name: (Atomic) Remove extra docker lv from root vg
+ lvol:
+ lv: docker-pool
+ vg: atomicos
+ state: absent
+ force: yes
+ - name: (Atomic) Grow root lv to fill vg
+ lvol:
+ lv: root
+ vg: atomicos
+ size: +100%FREE
+ - name: (Atomic) Grow root fs to match lv
+ filesystem:
+ dev: /dev/mapper/atomicos-root
+ fstype: xfs
+ resizefs: yes
+ - name: (Atomic) Force Ansible to re-gather disk facts
+ setup:
+ filter: 'ansible_mounts'
+ when:
+ - is_atomic | bool
+ - ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2 b/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
new file mode 100644
index 00000000..b8020cb0
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
@@ -0,0 +1,19 @@
+# This file is managed by Ansible
+
+server 0.rhel.pool.ntp.org
+server 1.rhel.pool.ntp.org
+server 2.rhel.pool.ntp.org
+server 3.rhel.pool.ntp.org
+
+driftfile /var/lib/chrony/drift
+makestep 10 3
+
+keyfile /etc/chrony.keys
+commandkey 1
+generatecommandkey
+
+noclientlog
+logchange 0.5
+
+logdir /var/log/chrony
+log measurements statistics tracking
diff --git a/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
new file mode 100644
index 00000000..a951d622
--- /dev/null
+++ b/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
@@ -0,0 +1,3 @@
+---
+locale: en_US.UTF-8
+timezone: UTC
diff --git a/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml b/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
new file mode 100644
index 00000000..d53f5bd2
--- /dev/null
+++ b/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
@@ -0,0 +1,44 @@
+# NOTE(vponomar): this role should not be run from nodes
+# which are going to be rebooted.
+---
+
+- block:
+ - name: Check that hostnames_for_reboot var is set and it is not empty list
+ fail:
+ msg: "Role 'yum-update-and-reboot' expects 'hostnames_for_reboot' var
+ to be set as a list of hostnames which should be rebooted."
+ when: "(hostnames_for_reboot is not defined) or hostnames_for_reboot | length < 1"
+
+ - name: Run yum_update command
+ command: "yum update -y"
+ delegate_to: "{{ item }}"
+ with_items: "{{ hostnames_for_reboot }}"
+
+ - name: Reboot machine to apply all major changes to the system if exist
+ shell: "sleep 3 ; /sbin/shutdown -r now 'Reboot triggered by Ansible'"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ delegate_to: "{{ item }}"
+ with_items: "{{ hostnames_for_reboot }}"
+
+ - name: Wait for machine to go down
+ wait_for:
+ host: "{{ item }}"
+ port: 22
+ delay: 0
+ timeout: 180
+ connect_timeout: 5
+ state: stopped
+ with_items: "{{ hostnames_for_reboot }}"
+
+ - name: Wait for machine to go up
+ wait_for:
+ host: "{{ item }}"
+ port: 22
+ delay: 0
+ timeout: 180
+ connect_timeout: 5
+ state: started
+ with_items: "{{ hostnames_for_reboot }}"
+ when: "disable_yum_update_and_reboot is undefined or not (disable_yum_update_and_reboot | bool)"
diff --git a/deployment/playbooks/scaleup.yaml b/deployment/playbooks/scaleup.yaml
new file mode 100644
index 00000000..4a21eadc
--- /dev/null
+++ b/deployment/playbooks/scaleup.yaml
@@ -0,0 +1,35 @@
+---
+- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) |
+ ternary(
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/' +
+ 'byo/openshift-node/scaleup.yml',
+ 'noop.yaml')
+ }}"
+
+- include: "{{ (openshift_vers in ['v3_9']) |
+ ternary(
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/' +
+ 'openshift-node/scaleup.yml',
+ 'noop.yaml')
+ }}"
+
+# NOTE(vponomar): following playbooks are what we need from
+# 'playbooks/openshift-node/scaleup.yml' playbook in OCP3.10 and OCP3.11
+# It may be changed for OCP3.11+ versions.
+- include: "{{ (openshift_vers not in ['v3_6', 'v3_7', 'v3_9']) |
+ ternary(
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/' +
+ 'openshift-node/private/bootstrap.yml',
+ 'noop.yaml')
+ }}"
+
+- include: "{{ (openshift_vers not in ['v3_6', 'v3_7', 'v3_9']) |
+ ternary(
+ lookup('env', 'VIRTUAL_ENV') +
+ '/usr/share/ansible/openshift-ansible/playbooks/' +
+ 'openshift-node/private/join.yml',
+ 'noop.yaml')
+ }}"
diff --git a/deployment/playbooks/setup.yaml b/deployment/playbooks/setup.yaml
new file mode 100644
index 00000000..2166c2fc
--- /dev/null
+++ b/deployment/playbooks/setup.yaml
@@ -0,0 +1,27 @@
+---
+- hosts: localhost
+ user: root
+ become: false
+ vars_files:
+ - vars/main.yaml
+ tasks:
+ - name: "Create resource pool on vCenter"
+ vmware_resource_pool:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ vcenter_datacenter }}"
+ cluster: "{{ vcenter_cluster}}"
+ resource_pool: "{{ vcenter_resource_pool }}"
+ state: "present"
+ validate_certs: False
+ - name: "Create folder structure on vCenter"
+ vmware_folder:
+ hostname: "{{ vcenter_host }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ vcenter_datacenter }}"
+ cluster: "{{ vcenter_cluster}}"
+ folder: "{{ vcenter_folder }}"
+ state: "present"
+ validate_certs: False
diff --git a/deployment/playbooks/vars/main.yaml b/deployment/playbooks/vars/main.yaml
new file mode 100644
index 00000000..b646ec89
--- /dev/null
+++ b/deployment/playbooks/vars/main.yaml
@@ -0,0 +1,31 @@
+---
+# OpenShift variables
+openshift_master_cluster_hostname: "{{ lb_host }}"
+openshift_master_cluster_public_hostname: "{{ lb_host }}"
+console_port: 8443
+openshift_vers: "{{ openshift_vers | default('v3_6')}}"
+openshift_major_version: "{{ openshift_vers.split('_')[-1] }}"
+openshift_ansible_branch: release-3.{{ openshift_major_version }}
+openshift_required_repos:
+- rhel-7-server-rpms
+- rhel-7-server-extras-rpms
+- rhel-7-server-ose-3.{{ openshift_major_version }}-rpms
+- rhel-7-fast-datapath-rpms
+# 'openshift_node_groups' is required for OCP3.10
+openshift_node_groups:
+- name: node-config-master
+ labels:
+ - 'node-role.kubernetes.io/master=true'
+ - 'role=master'
+ edits: []
+- name: node-config-compute
+ labels:
+ - 'node-role.kubernetes.io/compute=true'
+ - 'node-role.kubernetes.io/infra=true'
+ - 'role=compute'
+ edits: []
+- name: node-config-storage
+ labels:
+ - 'node-role.kubernetes.io/storage=true'
+ - 'role=storage'
+ edits: []