summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2019-12-21 00:09:11 +0530
committerValerii Ponomarov <vponomar@redhat.com>2019-12-21 00:15:18 +0530
commitd28b637d972aa600ed15ef437e4cb59c53e0ca5e (patch)
tree5cefd011a1d3b6d2ebc395227494ed64dc1da286
parent678dd026c8961a713dec15a153ef9da64c6b171b (diff)
Delete 'deployment' dir as obsolete and not supported
'deployment' directory contains obsolete and unsupported approach for deploying OpenShift and GlusterFS storage. Separate deployment approach has already been used for significant amount of time. So, delete local 'directory' as it makes no sense to keep it. Moreover, it may be confusing for people who may try it out and see unpredictable errors. Change-Id: Ibf353500bab59853f597304cb9c1990102c000ef
-rw-r--r--deployment/LICENSE.md201
-rw-r--r--deployment/README.rst161
-rwxr-xr-xdeployment/add-node.py668
-rw-r--r--deployment/ansible.cfg15
-rw-r--r--deployment/cns-automation-config.yaml53
-rwxr-xr-xdeployment/inventory/vsphere/vms/vmware_inventory.ini71
-rwxr-xr-xdeployment/inventory/vsphere/vms/vmware_inventory.py567
-rw-r--r--deployment/ocp-on-vmware.ini156
-rwxr-xr-xdeployment/ocp-on-vmware.py477
-rw-r--r--deployment/playbooks/add-node-prerequisite.yaml16
-rw-r--r--deployment/playbooks/add-node.yaml142
-rw-r--r--deployment/playbooks/clean.yaml66
-rw-r--r--deployment/playbooks/cleanup-cns.yaml38
-rw-r--r--deployment/playbooks/cleanup-crs.yaml38
-rw-r--r--deployment/playbooks/cns-node-setup.yaml131
-rw-r--r--deployment/playbooks/cns-setup.yaml164
-rw-r--r--deployment/playbooks/cns-storage.yaml15
-rw-r--r--deployment/playbooks/crs-node-setup.yaml123
-rw-r--r--deployment/playbooks/crs-setup.yaml209
-rw-r--r--deployment/playbooks/crs-storage.yaml12
-rw-r--r--deployment/playbooks/gather_logs.yaml883
-rw-r--r--deployment/playbooks/generate-tests-config.yaml140
-rw-r--r--deployment/playbooks/get_ocp_info.yaml233
l---------deployment/playbooks/library/rpm_q.py1
-rw-r--r--deployment/playbooks/library/vmware_folder.py268
-rw-r--r--deployment/playbooks/library/vmware_resource_pool.py361
-rw-r--r--deployment/playbooks/node-setup.yaml92
-rw-r--r--deployment/playbooks/noop.yaml7
-rw-r--r--deployment/playbooks/ocp-configure.yaml16
-rw-r--r--deployment/playbooks/ocp-end-to-end.yaml15
-rw-r--r--deployment/playbooks/ocp-install.yaml365
-rw-r--r--deployment/playbooks/prerequisite.yaml26
-rw-r--r--deployment/playbooks/prod-ose-cns.yaml11
-rw-r--r--deployment/playbooks/prod-ose-crs.yaml11
-rw-r--r--deployment/playbooks/prod.yaml19
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml13
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j211
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml8
-rw-r--r--deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml142
-rw-r--r--deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml143
-rw-r--r--deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml157
-rw-r--r--deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml66
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml7
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml39
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j24
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j27
-rw-r--r--deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml15
-rw-r--r--deployment/playbooks/roles/etcd-storage/tasks/main.yaml24
-rw-r--r--deployment/playbooks/roles/gluster-ports/defaults/main.yaml3
-rw-r--r--deployment/playbooks/roles/gluster-ports/tasks/main.yaml34
-rw-r--r--deployment/playbooks/roles/instance-groups/tasks/main.yaml152
-rw-r--r--deployment/playbooks/roles/master-prerequisites/tasks/main.yaml6
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml27
-rw-r--r--deployment/playbooks/roles/package-repos/tasks/main.yaml23
-rw-r--r--deployment/playbooks/roles/prerequisites/defaults/main.yaml6
l---------deployment/playbooks/roles/prerequisites/library/openshift_facts.py1
-rw-r--r--deployment/playbooks/roles/prerequisites/library/rpm_q.py73
-rw-r--r--deployment/playbooks/roles/prerequisites/tasks/main.yaml84
-rw-r--r--deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml14
-rw-r--r--deployment/playbooks/roles/rhsm/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/rhsm/tasks/main.yaml49
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml83
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml29
-rw-r--r--deployment/playbooks/roles/storage-class-configure/tasks/main.yaml22
-rw-r--r--deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j28
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml6
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml89
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j219
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml48
-rw-r--r--deployment/playbooks/scaleup.yaml35
-rw-r--r--deployment/playbooks/setup.yaml27
-rw-r--r--deployment/playbooks/vars/main.yaml76
-rwxr-xr-xdeployment/scripts/install_openshift_ansible.sh35
-rwxr-xr-xdeployment/scripts/install_yedit_for_ansible.sh17
-rw-r--r--deployment/tox.ini122
78 files changed, 0 insertions, 7511 deletions
diff --git a/deployment/LICENSE.md b/deployment/LICENSE.md
deleted file mode 100644
index 6a3a047f..00000000
--- a/deployment/LICENSE.md
+++ /dev/null
@@ -1,201 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2014 Red Hat, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/deployment/README.rst b/deployment/README.rst
deleted file mode 100644
index f2e70646..00000000
--- a/deployment/README.rst
+++ /dev/null
@@ -1,161 +0,0 @@
-=====================================
-Deployment of OpenShift 3.x On VMWare
-=====================================
-
------------
-What is it?
------------
-
-It is end-to-end deployment tool for deployment of OpenShift 3.x and
-OpenShift Container Storage (OCS) on top of the VMWare cloud platform.
-It is set of `Ansible <https://github.com/ansible/ansible>`__ playbooks,
-which wraps the
-`openshift-ansible <https://github.com/openshift/openshift-ansible>`__ library.
-
-This wrapper adds additional things, which are needed and not provided by
-mentioned library. Such as following:
-
-- Node provisioning.
-
-- Node preparation.
-
-- Gather info about newly provisioned nodes and installed OpenShift (including
- storage part). Providing, as an output, config file for automated test cases.
-
-- Run post-config actions which do required, for automated testing, stuff.
-
---------------------------
-What can it do? It can ...
---------------------------
-
-- ... deploy OpenShift 3.6, 3.7, 3.9, 3.10 and 3.11 on top of
- the VMWare cloud platform.
-
-- ... deploy containerized and standalone GlusterFS clusters.
- The GlusterFS versions are configurable and depend on the used repositories.
-
-- ... use downstream repositories for package installation.
-
-- ... use any docker registry for getting container images.
-
--------------------
-VMWare requirements
--------------------
-
-- DHCP configured for all the new VMs.
-
-- New VMs get deployed from VMWare 'template'. So, should be created proper
- VMWare template. It can be bare RHEL7. Or somehow updated RHEL7.
-
-- One OpenShift cluster is expected to be, at least, 5 VMs large. So,
- there should be enough resources for it.
-
------
-Usage
------
-
-1) Create VMWare template VM using RHEL7
-----------------------------------------
-
-- Add SSH public key(s) for password-less connection required by Ansible
-
-.. code-block:: console
-
- $ ssh-copy-id username@ip_address_of_the_vm_which_will_be_used_as_template
-
-- Make sure that default user SSH key pair is the same on the “Ansible” machine
-
-2) Install dependencies
------------------------
-
-Install following dependencies on the machine where you are going to run
-deployment.
-
-- Install “pip”, “git” and “libselinux-python” if not installed yet:
-
-.. code-block:: console
-
- $ yum install python-pip git libselinux-python
-
-- Install “tox” if not installed yet:
-
-.. code-block:: console
-
- $ pip install git+git://github.com/tox-dev/tox.git@2.9.1#egg=tox
-
-Considering the fact that it is 'end-to'end' deployment tool,
-deployment always will run on the separate machine compared to the machines
-of deployed cluster.
-
-3) Configure tool before starting deployment
---------------------------------------------
-
-Open “ocp-on-vmware.ini” file with any text editor and provide correct values
-for all the config options. All of the options have inline descriptions in
-the same file.
-
-5) Deploy OpenShift:
---------------------
-
-OpenShift can be deployed using following command:
-
-.. code-block:: console
-
- $ tox -e ocp3.X -- python ocp-on-vmware.py --no_confirm --verbose
-
-Replace 'X' in the 'ocp3.X' part of the command to
-the proper minor version of the OpenShift. Allowed are following values:
-'ocp3.6', 'ocp3.7', 'ocp3.9', 'ocp3.10' and 'ocp3.11'. The same is true for
-below commands too.
-
-
-6) Install OpenShift Container Storage
---------------------------------------
-
-Use following command for brownfield installation of
-OpenShift Container Storage on top of the already
-deployed (in previous step) OpenShift cluster:
-
-.. code-block:: console
-
- $ tox -e ocp3.X -- python add-node.py \
- --verbose --node_type=storage --node_number=3 --no_confirm
-
-Note that if “--node_number=Y” is not provided, then 3 nodes will be installed
-by default. Type of storage (CNS or CRS) is defined in
-“ocp-on-vmware.ini” file. Where "CNS" is containerized GlusterFS and
-"CRS" is standalone GlusterFS installations.
-
-
-7) Clean up deployed cluster
-----------------------------
-
-If deplyoed cluster is not needed anymore, it can be cleaned up using following
-command:
-
-.. code-block:: console
-
- $ tox -e ocp3.x -- python ocp-on-vmware.py --clean
-
-
-------------------------
-History of the code base
-------------------------
-
-Originally, code base was forked from
-`openshift-ansible-contrib <https://github.com/openshift/openshift-ansible-contrib>`__
-project.
-It supported only OpenShift 3.6 with restricted set of features at that moment.
-Project was exactly 'forked', and not 'used directly', just because that
-'restricted set of features' didn't satisfy
-our (OpenShift Storage Quality Assurance team) needs and environments.
-Our needs were usage of VMWare cloud platform with configured DHCP for new VMs.
-
-So, for ability to have end-to-end deployment tool, we forked it and started
-actively work on it. Not having time for long review process of the source
-project PRs (Pull Requests).
-Then this 'fork' envolved a lot. It started supporting bunch of OpenShift
-versions in the single code base. In addition to the addon of
-other new features.
-And, finally, this code came to the repo with 'automated test cases' which are
-used with this deployment tool in CI.
diff --git a/deployment/add-node.py b/deployment/add-node.py
deleted file mode 100755
index 1f584c11..00000000
--- a/deployment/add-node.py
+++ /dev/null
@@ -1,668 +0,0 @@
-#!/usr/bin/env python
-# vim: sw=4 ts=4 et
-
-import argparse
-import click
-import fileinput
-import os
-import re
-import requests
-import six
-from six.moves import configparser
-import sys
-import yaml
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-
-class VMWareAddNode(object):
- __name__ = 'VMWareAddNode'
-
- openshift_vers = None
- cluster_id = None
- vcenter_host = None
- vcenter_username = None
- vcenter_password = None
- vcenter_template_name = None
- vcenter_folder = None
- vcenter_datastore = None
- vcenter_datacenter = None
- vcenter_cluster = None
- vcenter_datacenter = None
- vcenter_resource_pool = None
- rhel_subscription_server = None
- openshift_sdn = None
- compute_nodes = None
- storage_nodes = None
- cns_automation_config_file_path = None
- ocp_hostname_prefix = None
- deployment_type = None
- console_port = 8443
- rhel_subscription_user = None
- rhel_subscription_pass = None
- rhel_subscription_pool = None
- dns_zone = None
- app_dns_prefix = None
- admin_key = None
- user_key = None
- wildcard_zone = None
- inventory_file = 'add-node.json'
- node_type = None
- node_number = None
- openshift_disable_check = None
- container_storage = None
- container_storage_disks = None
- container_storage_block_hosting_volume_size = None
- container_storage_disk_type = None
- additional_disks_to_storage_nodes = None
- container_storage_glusterfs_timeout = None
- heketi_admin_key = None
- heketi_user_key = None
- tag = None
- verbose = 0
- docker_registry_url = None
- docker_additional_registries = None
- docker_insecure_registries = None
- docker_image_tag = None
- ose_puddle_repo = None
- gluster_puddle_repo = None
- cns_glusterfs_image = None
- cns_glusterfs_version = None
- cns_glusterfs_block_image = None
- cns_glusterfs_block_version = None
- cns_glusterfs_heketi_image = None
- cns_glusterfs_heketi_version = None
- disable_yum_update_and_reboot = None
- openshift_use_crio = None
-
- def __init__(self):
- self.parse_cli_args()
- self.read_ini_settings()
- self.create_inventory_file()
- self.launch_refarch_env()
-
- def update_ini_file(self):
- """Update INI file with added number of nodes."""
- scriptbasename = "ocp-on-vmware"
- defaults = {'vmware': {
- 'ini_path': os.path.join(
- os.path.dirname(__file__), '%s.ini' % scriptbasename),
- 'storage_nodes': '3',
- 'compute_nodes': '2',
- }}
- # where is the config?
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
-
- vmware_ini_path = os.environ.get(
- 'VMWARE_INI_PATH', defaults['vmware']['ini_path'])
- vmware_ini_path = os.path.expanduser(
- os.path.expandvars(vmware_ini_path))
- config.read(vmware_ini_path)
-
- if 'compute' in self.node_type:
- self.compute_nodes = (
- int(self.compute_nodes) + int(self.node_number))
- config.set('vmware', 'compute_nodes', str(self.compute_nodes))
- print "Updating %s file with %s compute_nodes" % (
- vmware_ini_path, self.compute_nodes)
- if 'storage' in self.node_type:
- self.storage_nodes = int(self.storage_nodes) or 3
- config.set('vmware', 'storage_nodes', str(self.storage_nodes))
- print "Updating %s file with %s storage_nodes" % (
- vmware_ini_path, self.storage_nodes)
-
- for line in fileinput.input(vmware_ini_path, inplace=True):
- if line.startswith("compute_nodes"):
- print "compute_nodes=" + str(self.compute_nodes)
- elif line.startswith("storage_nodes"):
- print "storage_nodes=" + str(self.storage_nodes)
- else:
- print line,
-
- def parse_cli_args(self):
- """Command line argument processing."""
-
- tag_help = """Skip to various parts of install valid tags include:
- - vms (create vms for adding nodes to cluster or CNS/CRS)
- - node-setup (install the proper packages on the CNS/CRS nodes)
- - clean (remove vms and unregister them from RHN
- also remove storage classes or secrets"""
- parser = argparse.ArgumentParser(
- description='Add new nodes to an existing OCP deployment',
- formatter_class=argparse.RawTextHelpFormatter)
- parser.add_argument(
- '--node_type', action='store', default='compute',
- help='Specify the node label: compute, storage')
- parser.add_argument(
- '--node_number', action='store', default='3',
- help='Specify the number of nodes to add.')
- parser.add_argument(
- '--create_inventory', action='store_true',
- help=('Deprecated and not used option. '
- 'Everything that is needed gets autocreated.'))
- parser.add_argument(
- '--no_confirm', action='store_true',
- help='Skip confirmation prompt')
- parser.add_argument('--tag', default=None, help=tag_help)
- parser.add_argument(
- '--verbose', default=None, action='store_true',
- help='Verbosely display commands')
- self.args = parser.parse_args()
- self.verbose = self.args.verbose
-
- def _is_rpm_and_image_tag_compatible(self):
- if not (self.docker_image_tag and self.ose_puddle_repo):
- return True
- url = self.ose_puddle_repo
- if url[-1] == '/':
- url += 'Packages/'
- else:
- url += '/Packages/'
- resp = requests.get(url)
- if resp.ok:
- v = self.docker_image_tag.split('v')[-1].strip().split('-')[0]
- return (('atomic-openshift-%s' % v) in resp.text)
- raise Exception(
- "Failed to pull list of packages from '%s' url." % url)
-
- def read_ini_settings(self):
- """Read ini file settings."""
-
- scriptbasename = "ocp-on-vmware"
- defaults = {'vmware': {
- 'ini_path': os.path.join(
- os.path.dirname(__file__), '%s.ini' % scriptbasename),
- 'console_port': '8443',
- 'container_storage': 'none',
- 'container_storage_disks': '100,600',
- 'container_storage_block_hosting_volume_size': '99',
- 'additional_disks_to_storage_nodes': '100',
- 'container_storage_disk_type': 'eagerZeroedThick',
- 'container_storage_glusterfs_timeout': '',
- 'heketi_admin_key': '',
- 'heketi_user_key': '',
- 'docker_registry_url': '',
- 'docker_additional_registries': '',
- 'docker_insecure_registries': '',
- 'docker_image_tag': '',
- 'ose_puddle_repo': '',
- 'gluster_puddle_repo': '',
- 'cns_glusterfs_image': 'rhgs3/rhgs-server-rhel7',
- 'cns_glusterfs_version': 'latest',
- 'cns_glusterfs_block_image': 'rhgs3/rhgs-gluster-block-prov-rhel7',
- 'cns_glusterfs_block_version': 'latest',
- 'cns_glusterfs_heketi_image': 'rhgs3/rhgs-volmanager-rhel7',
- 'cns_glusterfs_heketi_version': 'latest',
- 'deployment_type': 'openshift-enterprise',
- 'openshift_vers': 'v3_11',
- 'vcenter_username': 'administrator@vsphere.local',
- 'vcenter_template_name': 'not-defined',
- 'vcenter_folder': 'ocp',
- 'vcenter_resource_pool': '/Resources/OCP3',
- 'app_dns_prefix': 'apps',
- 'vm_network': 'VM Network',
- 'rhel_subscription_pool': 'Employee SKU',
- 'openshift_sdn': 'redhat/openshift-ovs-subnet',
- 'compute_nodes': '2',
- 'storage_nodes': '3',
- 'cns_automation_config_file_path': '',
- 'ocp_hostname_prefix': 'openshift-on-vmware',
- 'node_type': self.args.node_type,
- 'node_number': self.args.node_number,
- 'tag': self.args.tag,
- 'openshift_disable_check': (
- 'docker_storage,docker_image_availability,disk_availability'),
- 'disable_yum_update_and_reboot': 'no',
- 'openshift_use_crio': 'false',
- }}
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
-
- # where is the config?
- vmware_ini_path = os.environ.get(
- 'VMWARE_INI_PATH', defaults['vmware']['ini_path'])
- vmware_ini_path = os.path.expanduser(
- os.path.expandvars(vmware_ini_path))
- config.read(vmware_ini_path)
-
- # apply defaults
- for k, v in defaults['vmware'].items():
- if not config.has_option('vmware', k):
- config.set('vmware', k, str(v))
-
- self.console_port = config.get('vmware', 'console_port')
- self.cluster_id = config.get('vmware', 'cluster_id')
- self.container_storage = config.get('vmware', 'container_storage')
- self.container_storage_disks = config.get(
- 'vmware', 'container_storage_disks')
- self.container_storage_block_hosting_volume_size = config.get(
- 'vmware',
- 'container_storage_block_hosting_volume_size').strip() or 99
- self.container_storage_disk_type = config.get(
- 'vmware', 'container_storage_disk_type')
- self.additional_disks_to_storage_nodes = config.get(
- 'vmware', 'additional_disks_to_storage_nodes')
- self.container_storage_glusterfs_timeout = config.get(
- 'vmware', 'container_storage_glusterfs_timeout')
- self.heketi_admin_key = config.get('vmware', 'heketi_admin_key')
- self.heketi_user_key = config.get('vmware', 'heketi_user_key')
- self.docker_registry_url = config.get('vmware', 'docker_registry_url')
- self.docker_additional_registries = config.get(
- 'vmware', 'docker_additional_registries')
- self.docker_insecure_registries = config.get(
- 'vmware', 'docker_insecure_registries')
- self.docker_image_tag = (
- config.get('vmware', 'docker_image_tag') or '').strip()
- self.ose_puddle_repo = config.get('vmware', 'ose_puddle_repo')
- self.gluster_puddle_repo = config.get('vmware', 'gluster_puddle_repo')
- self.cns_glusterfs_image = (
- config.get('vmware', 'cns_glusterfs_image')).strip()
- self.cns_glusterfs_version = (
- config.get('vmware', 'cns_glusterfs_version')).strip()
- self.cns_glusterfs_block_image = (
- config.get('vmware', 'cns_glusterfs_block_image')).strip()
- self.cns_glusterfs_block_version = (
- config.get('vmware', 'cns_glusterfs_block_version')).strip()
- self.cns_glusterfs_heketi_image = (
- config.get('vmware', 'cns_glusterfs_heketi_image')).strip()
- self.cns_glusterfs_heketi_version = (
- config.get('vmware', 'cns_glusterfs_heketi_version')).strip()
- self.deployment_type = config.get('vmware', 'deployment_type')
- self.openshift_vers = config.get('vmware', 'openshift_vers')
- self.vcenter_host = config.get('vmware', 'vcenter_host')
- self.vcenter_username = config.get('vmware', 'vcenter_username')
- self.vcenter_password = config.get('vmware', 'vcenter_password')
- self.vcenter_template_name = config.get(
- 'vmware', 'vcenter_template_name')
- self.vcenter_folder = config.get('vmware', 'vcenter_folder')
- self.vcenter_datastore = config.get('vmware', 'vcenter_datastore')
- self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
- self.vcenter_cluster = config.get('vmware', 'vcenter_cluster')
- self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
- self.vcenter_resource_pool = config.get(
- 'vmware', 'vcenter_resource_pool')
- self.dns_zone = config.get('vmware', 'dns_zone')
- self.app_dns_prefix = config.get('vmware', 'app_dns_prefix')
- self.vm_network = config.get('vmware', 'vm_network')
- self.rhel_subscription_user = config.get(
- 'vmware', 'rhel_subscription_user')
- self.rhel_subscription_pass = config.get(
- 'vmware', 'rhel_subscription_pass')
- self.rhel_subscription_server = config.get(
- 'vmware', 'rhel_subscription_server')
- self.rhel_subscription_pool = config.get(
- 'vmware', 'rhel_subscription_pool')
- self.openshift_sdn = config.get('vmware', 'openshift_sdn')
- self.compute_nodes = int(config.get('vmware', 'compute_nodes')) or 2
- self.storage_nodes = int(config.get('vmware', 'storage_nodes')) or 3
- self.cns_automation_config_file_path = config.get(
- 'vmware', 'cns_automation_config_file_path')
- self.ocp_hostname_prefix = config.get(
- 'vmware', 'ocp_hostname_prefix') or 'ansible-on-vmware'
- self.lb_host = '%s-master-0' % self.ocp_hostname_prefix
- self.openshift_disable_check = config.get(
- 'vmware', 'openshift_disable_check').strip() or (
- 'docker_storage,docker_image_availability,disk_availability')
- self.disable_yum_update_and_reboot = config.get(
- 'vmware', 'disable_yum_update_and_reboot').strip() or 'no'
- self.node_type = config.get('vmware', 'node_type')
- self.node_number = config.get('vmware', 'node_number')
- self.tag = config.get('vmware', 'tag')
- self.openshift_use_crio = (
- config.get('vmware', 'openshift_use_crio') or '').strip()
- err_count = 0
-
- if 'storage' in self.node_type:
- if self.node_number < 3:
- err_count += 1
- print ("Node number for CNS and CRS should be 3 or more.")
- if self.container_storage is None:
- err_count += 1
- print ("Please specify crs or cns in container_storage in "
- "the %s." % vmware_ini_path)
- elif self.container_storage in ('cns', 'crs'):
- self.inventory_file = (
- "%s-inventory.json" % self.container_storage)
- required_vars = {
- 'cluster_id': self.cluster_id,
- 'dns_zone': self.dns_zone,
- 'vcenter_host': self.vcenter_host,
- 'vcenter_password': self.vcenter_password,
- 'vcenter_datacenter': self.vcenter_datacenter,
- }
- for k, v in required_vars.items():
- if v == '':
- err_count += 1
- print "Missing %s " % k
- if not (self.container_storage_disks
- and re.search(
- r'^[0-9]*(,[0-9]*)*$', self.container_storage_disks)):
- err_count += 1
- print ("'container_storage_disks' has improper value - "
- "'%s'. Only integers separated with comma are allowed." % (
- self.container_storage_disks))
- if self.container_storage_block_hosting_volume_size:
- try:
- self.container_storage_block_hosting_volume_size = int(
- self.container_storage_block_hosting_volume_size)
- except ValueError:
- err_count += 1
- print ("'container_storage_block_hosting_volume_size' can be "
- "either empty or integer. Provided value is '%s'" % (
- self.container_storage_block_hosting_volume_size))
- if (self.additional_disks_to_storage_nodes and not re.search(
- r'^[0-9]*(,[0-9]*)*$',
- self.additional_disks_to_storage_nodes)):
- err_count += 1
- print ("'additional_disks_to_storage_nodes' has improper "
- "value - '%s'. Only integers separated with comma "
- "are allowed." % self.additional_disks_to_storage_nodes)
- if self.container_storage_glusterfs_timeout:
- try:
- self.container_storage_glusterfs_timeout = int(
- self.container_storage_glusterfs_timeout)
- except ValueError:
- err_count += 1
- print ("'container_storage_glusterfs_timeout' can be "
- "either empty or integer. Provided value is '%s'" % (
- self.container_storage_glusterfs_timeout))
- if (self.cns_automation_config_file_path
- and not os.path.exists(
- os.path.abspath(self.cns_automation_config_file_path))):
- err_count += 1
- print ("Wrong value for 'cns_automation_config_file_path' "
- "config option. It is expected to be either a relative "
- "or an absolute file path.")
- else:
- self.cns_automation_config_file_path = os.path.abspath(
- self.cns_automation_config_file_path)
- if self.docker_image_tag and self.docker_registry_url:
- vers_from_reg = self.docker_registry_url.split(':')[-1].strip()
- if not vers_from_reg == self.docker_image_tag:
- err_count += 1
- print ("If 'docker_image_tag' and 'docker_registry_url' are "
- "specified, then their image tags should match. "
- "docker_image_tag='%s', docker_registry_url='%s'" % (
- self.docker_image_tag, self.docker_registry_url))
- if not self._is_rpm_and_image_tag_compatible():
- err_count += 1
- print ("OCP RPM versions and docker image tag do not match. "
- "Need either to change 'ose_puddle_repo' or "
- "'docker_image_tag' config options.")
- for opt_name in ('cns_glusterfs_image', 'cns_glusterfs_block_image',
- 'cns_glusterfs_heketi_image'):
- if len(getattr(self, opt_name).split(':')) > 1:
- err_count += 1
- print ("'%s' option is expected to contain "
- "only image name." % opt_name)
- allowed_disable_checks = (
- 'disk_availability',
- 'docker_image_availability',
- 'docker_storage',
- 'memory_availability',
- 'package_availability',
- 'package_version',
- )
- self.openshift_disable_check_data = [
- el.strip()
- for el in self.openshift_disable_check.strip().split(',')
- if el.strip()
- ]
- if not all([(s in allowed_disable_checks)
- for s in self.openshift_disable_check_data]):
- err_count += 1
- print ("'openshift_disable_check' is allowed to have only "
- "following values separated with comma: %s.\n "
- "Got following value: %s" % (','.join(
- allowed_disable_checks), self.openshift_disable_check))
-
- if err_count > 0:
- print "Please fill out the missing variables in %s " % (
- vmware_ini_path)
- exit(1)
- self.wildcard_zone = "%s.%s" % (self.app_dns_prefix, self.dns_zone)
- self.support_nodes = 0
-
- print 'Configured inventory values:'
- for each_section in config.sections():
- for (key, val) in config.items(each_section):
- if 'pass' in key:
- print '\t %s: ******' % key
- else:
- print '\t %s: %s' % (key, val)
- print '\n'
-
- def create_inventory_file(self):
- if not self.args.no_confirm:
- if not click.confirm(
- 'Continue creating the inventory file with these values?'):
- sys.exit(0)
-
- d = {'host_inventory': {}}
- for i in range(0, int(self.node_number)):
- # Determine node_number increment on the number of nodes
- if self.node_type == 'compute':
- guest_name = '%s-%s' % (self.node_type, i)
- guest_type = 'compute'
- elif (self.node_type == 'storage'
- and self.container_storage == 'crs'):
- guest_name = '%s-%s' % (self.container_storage, i)
- guest_type = self.container_storage
- elif (self.node_type == 'storage'
- and self.container_storage == 'cns'):
- guest_name = '%s-%s' % (self.container_storage, i)
- guest_type = self.container_storage
- else:
- raise Exception(
- "Unexpected combination of 'node_type' (%s) and "
- "'container_storage' (%s)." % (
- self.node_type, self.container_storage))
- if self.ocp_hostname_prefix:
- guest_name = "%s-%s" % (self.ocp_hostname_prefix, guest_name)
- d['host_inventory'][guest_name] = {
- 'guestname': guest_name,
- 'guesttype': guest_type,
- 'tag': str(self.cluster_id) + '-' + self.node_type,
- }
-
- with open(self.inventory_file, 'w') as outfile:
- json.dump(d, outfile, indent=4, sort_keys=True)
- print 'Inventory file created: %s' % self.inventory_file
-
- def launch_refarch_env(self):
- with open(self.inventory_file, 'r') as f:
- print yaml.safe_dump(json.load(f), default_flow_style=False)
-
- if not self.args.no_confirm:
- if not click.confirm('Continue adding nodes with these values?'):
- sys.exit(0)
-
- if (self.container_storage in ('cns', 'crs')
- and 'storage' in self.node_type):
- if 'None' in self.tag:
- # do the full install and config minus the cleanup
- self.tag = 'vms,node-setup'
- playbooks = ['playbooks/%s-storage.yaml' % self.container_storage]
- else:
- if 'None' in self.tag:
- # do the full install and config minus the cleanup
- self.tag = 'all'
- playbooks = ['playbooks/add-node.yaml']
-
- playbook_vars_dict = {
- 'add_node': 'yes',
- 'vcenter_host': self.vcenter_host,
- 'vcenter_username': self.vcenter_username,
- 'vcenter_password': self.vcenter_password,
- 'vcenter_template_name': self.vcenter_template_name,
- 'vcenter_folder': self.vcenter_folder,
- 'vcenter_datastore': self.vcenter_datastore,
- 'vcenter_cluster': self.vcenter_cluster,
- 'vcenter_datacenter': self.vcenter_datacenter,
- 'vcenter_resource_pool': self.vcenter_resource_pool,
- 'dns_zone': self.dns_zone,
- 'wildcard_zone': self.wildcard_zone,
- 'app_dns_prefix': self.app_dns_prefix,
- 'vm_network': self.vm_network,
- 'cns_automation_config_file_path': (
- self.cns_automation_config_file_path),
- 'console_port': self.console_port,
- 'cluster_id': self.cluster_id,
- 'container_storage': self.container_storage,
- 'container_storage_disks': self.container_storage_disks,
- 'container_storage_disk_type': self.container_storage_disk_type,
- 'additional_disks_to_storage_nodes': (
- self.additional_disks_to_storage_nodes),
- 'dp_tool_heketi_admin_key': self.heketi_admin_key,
- 'dp_tool_heketi_user_key': self.heketi_user_key,
- 'ose_puddle_repo': self.ose_puddle_repo,
- 'gluster_puddle_repo': self.gluster_puddle_repo,
- 'deployment_type': self.deployment_type,
- 'openshift_deployment_type': self.deployment_type,
- 'openshift_vers': self.openshift_vers,
- 'admin_key': self.admin_key,
- 'user_key': self.user_key,
- 'rhel_subscription_user': self.rhel_subscription_user,
- 'rhel_subscription_pass': self.rhel_subscription_pass,
- 'rhsm_satellite': self.rhel_subscription_server,
- 'rhsm_pool': self.rhel_subscription_pool,
- 'openshift_sdn': self.openshift_sdn,
- 'openshift_use_openshift_sdn': True,
- 'lb_host': self.lb_host,
- 'node_type': self.node_type,
- 'ocp_hostname_prefix': self.ocp_hostname_prefix,
- 'disable_yum_update_and_reboot': self.disable_yum_update_and_reboot
- }
- if self.openshift_disable_check_data:
- playbook_vars_dict["openshift_disable_check"] = (
- ','.join(self.openshift_disable_check_data))
- if self.container_storage_block_hosting_volume_size:
- playbook_vars_dict[
- 'openshift_storage_glusterfs_block_host_vol_size'] = (
- self.container_storage_block_hosting_volume_size)
- if self.container_storage_glusterfs_timeout:
- playbook_vars_dict['openshift_storage_glusterfs_timeout'] = (
- self.container_storage_glusterfs_timeout)
- if self.docker_registry_url:
- playbook_vars_dict['oreg_url'] = self.docker_registry_url
- if self.docker_additional_registries:
- playbook_vars_dict['openshift_docker_additional_registries'] = (
- self.docker_additional_registries)
- playbook_vars_dict['openshift_docker_ent_reg'] = ''
- if self.docker_insecure_registries:
- playbook_vars_dict['openshift_docker_insecure_registries'] = (
- self.docker_insecure_registries)
- if self.docker_image_tag:
- playbook_vars_dict['openshift_image_tag'] = self.docker_image_tag
-
- if self.openshift_vers == 'v3_11':
- if self.openshift_use_crio:
- playbook_vars_dict['openshift_use_crio'] = (
- self.openshift_use_crio)
- playbook_vars_dict['openshift_use_crio_only'] = (
- self.openshift_use_crio)
- playbook_vars_dict['openshift_crio_enable_docker_gc'] = (
- self.openshift_use_crio)
- else:
- playbook_vars_dict['openshift_use_crio'] = 'false'
- if self.openshift_vers in ("v3_6", "v3_7", "v3_9"):
- for key in ('image', 'version',
- 'block_image', 'block_version',
- 'heketi_image', 'heketi_version'):
- value = getattr(self, 'cns_glusterfs_%s' % key)
- if not value:
- continue
- playbook_vars_dict['openshift_storage_glusterfs_%s' % key] = (
- value)
- if self.openshift_vers in ('v3_6', 'v3_7'):
- playbook_vars_dict['docker_version'] = '1.12.6'
- elif self.openshift_vers != "v3_9":
- if self.cns_glusterfs_version:
- playbook_vars_dict['openshift_storage_glusterfs_image'] = (
- "%s:%s" % (
- self.cns_glusterfs_image or 'rhgs3/rhgs-server-rhel7',
- self.cns_glusterfs_version))
- elif self.cns_glusterfs_image:
- playbook_vars_dict['openshift_storage_glusterfs_image'] = (
- "%s:latest" % self.cns_glusterfs_image)
- if self.cns_glusterfs_block_version:
- playbook_vars_dict[
- 'openshift_storage_glusterfs_block_image'] = (
- "%s:%s" % (
- self.cns_glusterfs_block_image
- or 'rhgs3/rhgs-gluster-block-prov-rhel7',
- self.cns_glusterfs_block_version))
- elif self.cns_glusterfs_block_image:
- playbook_vars_dict[
- "openshift_storage_glusterfs_block_image"] = (
- "%s:latest" % self.cns_glusterfs_block_image)
- if self.cns_glusterfs_heketi_version:
- playbook_vars_dict[
- 'openshift_storage_glusterfs_heketi_image'] = (
- "%s:%s" % (
- self.cns_glusterfs_heketi_image
- or 'rhgs3/rhgs-volmanager-rhel7',
- self.cns_glusterfs_heketi_version))
- elif self.cns_glusterfs_heketi_image:
- playbook_vars_dict[
- "openshift_storage_glusterfs_heketi_image"] = (
- "%s:latest" % self.cns_glusterfs_heketi_image)
-
- playbook_vars_str = ' '.join('%s=%s' % (k, v)
- for (k, v) in playbook_vars_dict.items())
-
- for playbook in playbooks:
- devnull = '' if self.verbose > 0 else '> /dev/null'
-
- # refresh the inventory cache to prevent stale hosts from
- # interferring with re-running
- command = 'inventory/vsphere/vms/vmware_inventory.py %s' % (
- devnull)
- os.system(command)
-
- # remove any cached facts to prevent stale data during a re-run
- command = 'rm -rf .ansible/cached_facts'
- os.system(command)
-
- command = (
- "ansible-playbook"
- " --extra-vars '@./%s'"
- " --tags %s"
- " -e '%s' %s" % (
- self.inventory_file, self.tag, playbook_vars_str, playbook)
- )
-
- if self.verbose > 0:
- command += " -vvvvv"
-
- click.echo('We are running: %s' % command)
- status = os.system(command)
- if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
- sys.exit(os.WEXITSTATUS(status))
-
- command = (
- "ansible-playbook "
- "-i %smaster-0, playbooks/get_ocp_info.yaml") % (
- "%s-" % self.ocp_hostname_prefix
- if self.ocp_hostname_prefix else "")
- os.system(command)
-
- print "Successful run!"
- if click.confirm('Update INI?'):
- self.update_ini_file()
- if click.confirm('Delete inventory file?'):
- print "Removing the existing %s file" % self.inventory_file
- os.remove(self.inventory_file)
- sys.exit(0)
-
-
-if __name__ == '__main__':
- VMWareAddNode()
diff --git a/deployment/ansible.cfg b/deployment/ansible.cfg
deleted file mode 100644
index 09c118b9..00000000
--- a/deployment/ansible.cfg
+++ /dev/null
@@ -1,15 +0,0 @@
-[defaults]
-forks = 50
-host_key_checking = False
-inventory = inventory/vsphere/vms/vmware_inventory.py
-gathering = smart
-roles_path = /usr/share/ansible/openshift-ansible/roles:/opt/ansible/roles:./roles:../../roles
-remote_user = root
-private_key_file=~/.ssh/id_rsa
-retry_files_enabled=False
-log_path=./ansible.log
-
-[ssh_connection]
-ssh_args = -C -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -o PreferredAuthentications=publickey -o StrictHostKeyChecking=false
-control_path = inventory
-pipelining = True
diff --git a/deployment/cns-automation-config.yaml b/deployment/cns-automation-config.yaml
deleted file mode 100644
index 8d24006d..00000000
--- a/deployment/cns-automation-config.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-ocp_servers:
- master:
- master_ip_addr:
- hostname: "master_hostname"
- client:
- client_ip_addr:
- hostname: "client_hostname"
- nodes:
- node_ip_addr:
- hostname: "node_hostname"
-
-gluster_servers:
- gluster_server_ip_address1:
- manage: "gluster_server_ip_hostname1"
- storage: gluster_server_ip_address1
- additional_devices: ['/dev/sdg']
- gluster_server_ip_address2:
- manage: "gluster_server_ip_hostname2"
- storage: gluster_server_ip_address2
- additional_devices: ['/dev/sdg']
- gluster_server_ip_address3:
- manage: "gluster_server_ip_hostname3"
- storage: gluster_server_ip_address3
- additional_devices: ['/dev/sdg']
-
-openshift:
- storage_project_name: "storage"
- heketi_config:
- heketi_dc_name: name_of_the_heketi_deployment_config
- heketi_service_name: name_of_the_heketi_service
- heketi_client_node: master_ip_addr
- heketi_server_url: "http://heketi.url"
- heketi_cli_user: "admin"
- heketi_cli_key: "secret_value_for_admin_user"
- dynamic_provisioning:
- storage_classes:
- file_storage_class:
- provisioner: "kubernetes.io/glusterfs"
- resturl: "http://heketi.url"
- restuser: "admin"
- secretnamespace: "storage"
- volumenameprefix: "autotests-file"
- block_storage_class:
- provisioner: "gluster.org/glusterblock"
- resturl: "http://heketi.url"
- restuser: "admin"
- restsecretnamespace: "storage"
- hacount: "3"
- chapauthenabled: "true"
- volumenameprefix: "autotests-block"
-
-common:
- stop_on_first_failure: False
diff --git a/deployment/inventory/vsphere/vms/vmware_inventory.ini b/deployment/inventory/vsphere/vms/vmware_inventory.ini
deleted file mode 100755
index 13a50190..00000000
--- a/deployment/inventory/vsphere/vms/vmware_inventory.ini
+++ /dev/null
@@ -1,71 +0,0 @@
-#Ansible VMware external inventory script settings
-
-[vmware]
-
-# The resolvable hostname or ip address of the vsphere
-server=
-
-# The port for the vsphere API
-#port=443
-
-# The username with access to the vsphere API
-username=administrator@vsphere.local
-
-# The password for the vsphere API
-password=
-
-# Specify the number of seconds to use the inventory cache before it is
-# considered stale. If not defined, defaults to 0 seconds.
-cache_max_age = 0
-
-
-# Specify the directory used for storing the inventory cache. If not defined,
-# caching will be disabled.
-cache_dir = ~/.cache/ansible
-
-
-# Max object level refers to the level of recursion the script will delve into
-# the objects returned from pyvomi to find serializable facts. The default
-# level of 0 is sufficient for most tasks and will be the most performant.
-# Beware that the recursion can exceed python's limit (causing traceback),
-# cause sluggish script performance and return huge blobs of facts.
-# If you do not know what you are doing, leave this set to 1.
-#max_object_level=1
-
-
-# Lower the keynames for facts to make addressing them easier.
-#lower_var_keys=True
-
-
-# Host alias for objects in the inventory. VMWare allows duplicate VM names
-# so they can not be considered unique. Use this setting to alter the alias
-# returned for the hosts. Any atributes for the guest can be used to build
-# this alias. The default combines the config name and the config uuid and
-# expects that the ansible_host will be set by the host_pattern.
-#alias_pattern={{ config.name + '_' + config.uuid }}
-alias_pattern={{ config.name }}
-
-
-# Host pattern is the value set for ansible_host and ansible_ssh_host, which
-# needs to be a hostname or ipaddress the ansible controlhost can reach.
-#host_pattern={{ guest.ipaddress }}
-host_pattern={{ guest.hostname }}
-
-
-# Host filters are a comma separated list of jinja patterns to remove
-# non-matching hosts from the final result.
-# EXAMPLES:
-# host_filters={{ config.guestid == 'rhel7_64Guest' }}
-# host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }}
-# host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }}
-# The default is only gueststate of 'running'
-host_filters={{ guest.gueststate == "running" }}, {{ config.template != 'templates' }}
-
-
-# Groupby patterns enable the user to create groups via any possible jinja
-# expression. The resulting value will the groupname and the host will be added
-# to that group. Be careful to not make expressions that simply return True/False
-# because those values will become the literal group name. The patterns can be
-# comma delimited to create as many groups as necessary
-#groupby_patterns={{ guest.guestid }},{{ 'templates' if config.template else 'guests'}},
-groupby_patterns={{ config.annotation }}
diff --git a/deployment/inventory/vsphere/vms/vmware_inventory.py b/deployment/inventory/vsphere/vms/vmware_inventory.py
deleted file mode 100755
index 0cc485e7..00000000
--- a/deployment/inventory/vsphere/vms/vmware_inventory.py
+++ /dev/null
@@ -1,567 +0,0 @@
-#!/usr/bin/env python
-
-# Requirements
-# - pyvmomi >= 6.0.0.2016.4
-
-# TODO:
-# * more jq examples
-# * optional folder heirarchy
-
-"""
-$ jq '._meta.hostvars[].config' data.json | head
-{
- "alternateguestname": "",
- "instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
- "memoryhotaddenabled": false,
- "guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
- "changeversion": "2016-05-16T18:43:14.977925Z",
- "uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
- "cpuhotremoveenabled": false,
- "vpmcenabled": false,
- "firmware": "bios",
-"""
-
-from __future__ import print_function
-
-import argparse
-import atexit
-import datetime
-import jinja2
-import os
-import six
-import ssl
-import sys
-import uuid
-
-from six.moves import configparser
-from time import time
-
-HAS_PYVMOMI = False
-try:
- from pyVim.connect import SmartConnect, Disconnect
- HAS_PYVMOMI = True
-except ImportError:
- pass
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-
-class VMWareInventory(object):
-
- __name__ = 'VMWareInventory'
-
- instances = []
- debug = False
- load_dumpfile = None
- write_dumpfile = None
- maxlevel = 1
- lowerkeys = True
- config = None
- cache_max_age = None
- cache_path_cache = None
- cache_path_index = None
- server = None
- port = None
- username = None
- password = None
- host_filters = []
- groupby_patterns = []
-
- bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
- if (sys.version_info > (3, 0)):
- safe_types = [int, bool, str, float, None]
- else:
- safe_types = [int, long, bool, str, float, None]
- iter_types = [dict, list]
- skip_keys = ['dynamicproperty', 'dynamictype', 'managedby', 'childtype']
-
- def _empty_inventory(self):
- return {"_meta": {"hostvars": {}}}
-
- def __init__(self, load=True):
- self.inventory = self._empty_inventory()
-
- if load:
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.read_settings()
-
- # Check the cache
- cache_valid = self.is_cache_valid()
-
- # Handle Cache
- if self.args.refresh_cache or not cache_valid:
- self.do_api_calls_update_cache()
- else:
- self.inventory = self.get_inventory_from_cache()
-
- def debugl(self, text):
- if self.args.debug:
- try:
- text = str(text)
- except UnicodeEncodeError:
- text = text.encode('ascii', 'ignore')
- print(text)
-
- def show(self):
- # Data to print
- data_to_print = None
- if self.args.host:
- data_to_print = self.get_host_info(self.args.host)
- elif self.args.list:
- # Display list of instances for inventory
- data_to_print = self.inventory
- return json.dumps(data_to_print, indent=2)
-
- def is_cache_valid(self):
- """Determine if the cache files have expired or it is still valid."""
-
- valid = False
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- valid = True
-
- return valid
-
- def do_api_calls_update_cache(self):
- """Get instances and cache the data."""
-
- instances = self.get_instances()
- self.instances = instances
- self.inventory = self.instances_to_inventory(instances)
- self.write_to_cache(self.inventory, self.cache_path_cache)
-
- def write_to_cache(self, data, cache_path):
- """Dump inventory to json file."""
- with open(self.cache_path_cache, 'wb') as f:
- f.write(json.dumps(data))
-
- def get_inventory_from_cache(self):
- """Read in jsonified inventory."""
-
- jdata = None
- with open(self.cache_path_cache, 'rb') as f:
- jdata = f.read()
- return json.loads(jdata)
-
- def read_settings(self):
- """Reads the settings from the vmware_inventory.ini file."""
-
- scriptbasename = __file__
- scriptbasename = os.path.basename(scriptbasename)
- scriptbasename = scriptbasename.replace('.py', '')
-
- defaults = {'vmware': {
- 'server': '',
- 'port': 443,
- 'username': '',
- 'password': '',
- 'ini_path': os.path.join(
- os.path.dirname(__file__), '%s.ini' % scriptbasename),
- 'cache_name': 'ansible-vmware',
- 'cache_path': '~/.ansible/tmp',
- 'cache_max_age': 3600,
- 'max_object_level': 1,
- 'alias_pattern': '{{ config.name + "_" + config.uuid }}',
- 'host_pattern': '{{ guest.ipaddress }}',
- 'host_filters': '{{ guest.gueststate == "running" }}',
- 'groupby_patterns': ('{{ guest.guestid }},{{ "templates" if '
- 'config.template else "guests"}}'),
- 'lower_var_keys': True,
- }}
-
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
-
- # where is the config?
- vmware_ini_path = os.environ.get(
- 'VMWARE_INI_PATH', defaults['vmware']['ini_path'])
- vmware_ini_path = os.path.expanduser(
- os.path.expandvars(vmware_ini_path))
- config.read(vmware_ini_path)
-
- # apply defaults
- for k, v in defaults['vmware'].items():
- if not config.has_option('vmware', k):
- config.set('vmware', k, str(v))
-
- # where is the cache?
- self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
- if self.cache_dir and not os.path.exists(self.cache_dir):
- os.makedirs(self.cache_dir)
-
- # set the cache filename and max age
- cache_name = config.get('vmware', 'cache_name')
- self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
- self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
-
- # mark the connection info
- self.server = os.environ.get(
- 'VMWARE_SERVER', config.get('vmware', 'server'))
- self.port = int(os.environ.get(
- 'VMWARE_PORT', config.get('vmware', 'port')))
- self.username = os.environ.get(
- 'VMWARE_USERNAME', config.get('vmware', 'username'))
- self.password = os.environ.get(
- 'VMWARE_PASSWORD', config.get('vmware', 'password'))
-
- # behavior control
- self.maxlevel = int(config.get('vmware', 'max_object_level'))
- self.lowerkeys = config.get('vmware', 'lower_var_keys')
- if type(self.lowerkeys) != bool:
- if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
- self.lowerkeys = True
- else:
- self.lowerkeys = False
-
- self.host_filters = list(
- config.get('vmware', 'host_filters').split(','))
- self.groupby_patterns = list(
- config.get('vmware', 'groupby_patterns').split(','))
-
- # save the config
- self.config = config
-
- def parse_cli_args(self):
- """Command line argument processing."""
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on PyVmomi')
- parser.add_argument('--debug', action='store_true', default=False,
- help='show debug info')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument(
- '--host', action='store',
- help='Get all the variables about a specific instance')
- parser.add_argument(
- '--refresh-cache', action='store_true', default=False,
- help=("Force refresh of cache by making API requests to VSphere "
- "(default: False - use cache files)"))
- parser.add_argument('--max-instances', default=None, type=int,
- help='maximum number of instances to retrieve')
- self.args = parser.parse_args()
-
- def get_instances(self):
- """Get a list of vm instances with pyvmomi."""
-
- instances = []
- kwargs = {
- 'host': self.server,
- 'user': self.username,
- 'pwd': self.password,
- 'port': int(self.port),
- }
-
- if hasattr(ssl, 'SSLContext'):
- # older ssl libs do not have an SSLContext method:
- # context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
- # AttributeError: 'module' object has no attribute 'SSLContext'
- # older pyvmomi version also do not have an sslcontext kwarg:
- # https://github.com/vmware/pyvmomi/commit/92c1de5056be7c5390ac2a28eb08ad939a4b7cdd
- context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
- context.verify_mode = ssl.CERT_NONE
- kwargs['sslContext'] = context
-
- instances = self._get_instances(kwargs)
- self.debugl("### INSTANCES RETRIEVED")
- return instances
-
- def _get_instances(self, inkwargs):
- """Make API calls."""
-
- instances = []
- si = SmartConnect(**inkwargs)
-
- if not si:
- print("Could not connect to the specified host using specified "
- "username and password")
- return -1
- atexit.register(Disconnect, si)
- content = si.RetrieveContent()
- for child in content.rootFolder.childEntity:
- instances += self._get_instances_from_children(child)
- if self.args.max_instances:
- if len(instances) >= (self.args.max_instances + 1):
- instances = instances[0:(self.args.max_instances + 1)]
- instance_tuples = []
- for instance in sorted(instances):
- ifacts = self.facts_from_vobj(instance)
- instance_tuples.append((instance, ifacts))
- return instance_tuples
-
- def _get_instances_from_children(self, child):
- instances = []
-
- if hasattr(child, 'childEntity'):
- self.debugl("CHILDREN: %s" % child.childEntity)
- instances += self._get_instances_from_children(child.childEntity)
- elif hasattr(child, 'vmFolder'):
- self.debugl("FOLDER: %s" % child)
- instances += self._get_instances_from_children(child.vmFolder)
- elif hasattr(child, 'index'):
- self.debugl("LIST: %s" % child)
- for x in sorted(child):
- self.debugl("LIST_ITEM: %s" % x)
- instances += self._get_instances_from_children(x)
- elif hasattr(child, 'guest'):
- self.debugl("GUEST: %s" % child)
- instances.append(child)
- elif hasattr(child, 'vm'):
- # resource pools
- self.debugl("RESOURCEPOOL: %s" % child.vm)
- if child.vm:
- instances += self._get_instances_from_children(child.vm)
- else:
- self.debugl("ELSE ...")
- try:
- self.debugl(child.__dict__)
- except Exception:
- pass
- self.debugl(child)
- return instances
-
- def instances_to_inventory(self, instances):
- """Convert a list of vm objects into a json compliant inventory."""
-
- inventory = self._empty_inventory()
- inventory['all'] = {}
- inventory['all']['hosts'] = []
-
- for idx, instance in enumerate(instances):
-
- # make a unique id for this object to avoid vmware's
- # numerous uuid's which aren't all unique.
- thisid = str(uuid.uuid4())
- idata = instance[1]
-
- # Put it in the inventory
- inventory['all']['hosts'].append(thisid)
- inventory['_meta']['hostvars'][thisid] = idata.copy()
- inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid
-
- # Make a map of the uuid to the name the user wants
- name_mapping = self.create_template_mapping(
- inventory, self.config.get('vmware', 'alias_pattern'))
-
- # Make a map of the uuid to the ssh hostname the user wants
- host_mapping = self.create_template_mapping(
- inventory, self.config.get('vmware', 'host_pattern'))
-
- # Reset the inventory keys
- for k, v in name_mapping.items():
-
- # set ansible_host (2.x)
- inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
-
- # 1.9.x backwards compliance
- inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = (
- host_mapping[k])
-
- if k == v:
- continue
-
- # add new key
- inventory['all']['hosts'].append(v)
- inventory['_meta']['hostvars'][v] = (
- inventory['_meta']['hostvars'][k])
-
- # cleanup old key
- inventory['all']['hosts'].remove(k)
- inventory['_meta']['hostvars'].pop(k, None)
-
- self.debugl('PREFILTER_HOSTS:')
- for i in inventory['all']['hosts']:
- self.debugl(i)
-
- # Create special host filter removing all the hosts which
- # are not related to the configured cluster.
- if six.PY3:
- ocp_config = configparser.ConfigParser()
- else:
- ocp_config = configparser.SafeConfigParser()
- default_ocp_config = os.path.join(
- os.path.dirname(__file__), '../../../ocp-on-vmware.ini')
- ocp_ini_path = os.environ.get('VMWARE_INI_PATH', default_ocp_config)
- ocp_ini_path = os.path.expanduser(os.path.expandvars(ocp_ini_path))
- ocp_config.read(ocp_ini_path)
- cluster_id_filter = (
- "{{ config.annotation is not none and "
- "'%s' in config.annotation }}") % ocp_config.get(
- 'vmware', 'cluster_id')
- self.host_filters.append(cluster_id_filter)
-
- # Apply host filters
- for hf in self.host_filters:
- if not hf:
- continue
- self.debugl('FILTER: %s' % hf)
- filter_map = self.create_template_mapping(
- inventory, hf, dtype='boolean')
- for k, v in filter_map.items():
- if not v:
- # delete this host
- inventory['all']['hosts'].remove(k)
- inventory['_meta']['hostvars'].pop(k, None)
-
- self.debugl('POSTFILTER_HOSTS:')
- for i in inventory['all']['hosts']:
- self.debugl(i)
-
- # Create groups
- for gbp in self.groupby_patterns:
- groupby_map = self.create_template_mapping(inventory, gbp)
- for k, v in groupby_map.items():
- if v not in inventory:
- inventory[v] = {}
- inventory[v]['hosts'] = []
- if k not in inventory[v]['hosts']:
- inventory[v]['hosts'].append(k)
-
- return inventory
-
- def create_template_mapping(self, inventory, pattern, dtype='string'):
- """Return a hash of uuid to templated string from pattern."""
-
- mapping = {}
- for k, v in inventory['_meta']['hostvars'].items():
- t = jinja2.Template(pattern)
- newkey = None
- try:
- newkey = t.render(v)
- newkey = newkey.strip()
- except Exception as e:
- self.debugl(e)
- if not newkey:
- continue
- elif dtype == 'integer':
- newkey = int(newkey)
- elif dtype == 'boolean':
- if newkey.lower() == 'false':
- newkey = False
- elif newkey.lower() == 'true':
- newkey = True
- elif dtype == 'string':
- pass
- mapping[k] = newkey
- return mapping
-
- def facts_from_vobj(self, vobj, level=0):
- """Traverse a VM object and return a json compliant data structure."""
-
- # pyvmomi objects are not yet serializable, but may be one day ...
- # https://github.com/vmware/pyvmomi/issues/21
-
- rdata = {}
-
- # Do not serialize self
- if hasattr(vobj, '__name__'):
- if vobj.__name__ == 'VMWareInventory':
- return rdata
-
- # Exit early if maxlevel is reached
- if level > self.maxlevel:
- return rdata
-
- # Objects usually have a dict property
- if hasattr(vobj, '__dict__') and not level == 0:
-
- keys = sorted(vobj.__dict__.keys())
- for k in keys:
- v = vobj.__dict__[k]
- # Skip private methods
- if k.startswith('_'):
- continue
-
- if k.lower() in self.skip_keys:
- continue
-
- if self.lowerkeys:
- k = k.lower()
-
- rdata[k] = self._process_object_types(v, level=level)
- else:
- methods = dir(vobj)
- methods = [str(x) for x in methods if not x.startswith('_')]
- methods = [x for x in methods if x not in self.bad_types]
- methods = sorted(methods)
-
- for method in methods:
-
- if method in rdata:
- continue
-
- # Attempt to get the method, skip on fail
- try:
- methodToCall = getattr(vobj, method)
- except Exception:
- continue
-
- # Skip callable methods
- if callable(methodToCall):
- continue
-
- if self.lowerkeys:
- method = method.lower()
-
- rdata[method] = self._process_object_types(
- methodToCall,
- level=((level - 1)
- if method in ('guest', 'net') else level))
-
- return rdata
-
- def _process_object_types(self, vobj, level=0):
- rdata = {}
- self.debugl("PROCESSING: %s" % vobj)
-
- if type(vobj) in self.safe_types:
- try:
- rdata = vobj
- except Exception as e:
- self.debugl(e)
-
- elif hasattr(vobj, 'append'):
- rdata = []
- for vi in sorted(vobj):
- if type(vi) in self.safe_types:
- rdata.append(vi)
- else:
- if (level + 1 <= self.maxlevel):
- vid = self.facts_from_vobj(vi, level=(level + 1))
- if vid:
- rdata.append(vid)
-
- elif hasattr(vobj, '__dict__'):
- if (level + 1 <= self.maxlevel):
- md = None
- md = self.facts_from_vobj(vobj, level=(level + 1))
- if md:
- rdata = md
- elif not vobj or type(vobj) in self.safe_types:
- rdata = vobj
- elif type(vobj) == datetime.datetime:
- rdata = str(vobj)
- else:
- self.debugl("unknown datatype: %s" % type(vobj))
-
- if not rdata:
- rdata = None
- return rdata
-
- def get_host_info(self, host):
- """Return hostvars for a single host."""
- return self.inventory['_meta']['hostvars'][host]
-
-
-if __name__ == "__main__":
- # Run the script
- print(VMWareInventory().show())
diff --git a/deployment/ocp-on-vmware.ini b/deployment/ocp-on-vmware.ini
deleted file mode 100644
index a72a6543..00000000
--- a/deployment/ocp-on-vmware.ini
+++ /dev/null
@@ -1,156 +0,0 @@
-[vmware]
-# unique cluster_id set during script run
-cluster_id=
-
-# console port and install type for OpenShift
-console_port=8443
-
-# choices are openshift-enterprise or origin
-deployment_type=openshift-enterprise
-
-# OpenShift Version.
-# Supported versions are 'v3_6', 'v3_7', 'v3_9', 'v3_10' and 'v3_11'.
-openshift_vers=v3_11
-
-# vCenter host address/username and password
-vcenter_host=
-vcenter_username=administrator@vsphere.local
-vcenter_password=
-
-# name of RHEL template to use for OpenShift install
-vcenter_template_name=
-
-# folder/cluster/resource pool in vCenter to organize VMs
-vcenter_datacenter=CNS
-vcenter_cluster=
-vcenter_resource_pool=ansible_pool
-vcenter_datastore=
-vcenter_folder=ansible_vms
-
-# It is possible to enable and disable 'yum update' operation and further
-# node reboot using 'disable_yum_update_and_reboot' boolean option.
-# It can get any Ansible boolean-like value.
-disable_yum_update_and_reboot=no
-
-### Docker config ###
-# 'docker_registry_url' specifies main docker registry to pull images from.
-# Optional. If not set, default one will be used. Example:
-# docker_registry_url=myregistry.foo:5000/openshift3/ose-${component}:${version}
-docker_registry_url=
-
-# 'docker_additional_registries' option is used to define list of
-# additional registries. Optional. Example:
-# docker_additional_registries=bar-registry.bar:8888,foo-registry.foo:5000
-docker_additional_registries=
-
-# 'docker_insecure_registries' option is used to define list of
-# insecure registries. Optional. Example:
-# docker_insecure_registries=bar-registry.bar:8888,foo-registry.foo:5000
-docker_insecure_registries=
-
-# Specify 'docker_image_tag' to install specific versions of docker images.
-# Optional. Example: docker_image_tag=v3.9.3
-docker_image_tag=
-
-### Repositories config ###
-# Specify 'ose_puddle_repo' option in case it is needed to use downstream
-# repository with OpenShift packages, for example development versions.
-# It should be link to the packages, not "repo" file with link to the packages.
-# If defined, then GA repo will be disabled.
-# Example:
-# ose_puddle_repo=http://mysite.com/my_openshift_packages/3.9/latest/x86_64/os/
-ose_puddle_repo=
-
-# Define 'gluster_puddle_repo' option the same way as 'ose_puddle_repo', just
-# for Gluster3 packages. Optional. If defined then GA repo will be disabled.
-gluster_puddle_repo=
-
-### CNS docker options ###
-# Set following options to change docker image names and their versions
-# for CNS pods. Leave empty to make Ansible use default values.
-cns_glusterfs_image=rhgs3/rhgs-server-rhel7
-cns_glusterfs_version=latest
-
-cns_glusterfs_block_image=rhgs3/rhgs-gluster-block-prov-rhel7
-cns_glusterfs_block_version=latest
-
-cns_glusterfs_heketi_image=rhgs3/rhgs-volmanager-rhel7
-cns_glusterfs_heketi_version=latest
-
-##########################
-
-# DNS zone where everything will be hosted and app wildcard prefix
-dns_zone=openshift-on-vmware
-app_dns_prefix=apps
-
-# Network VMWare in VMWare cluster to attach newly provisioned VMs to
-vm_network="VM Network"
-
-# Red Hat subscription manager data
-rhel_subscription_server=subscription.rhsm.stage.redhat.com:443
-rhel_subscription_user=
-rhel_subscription_pass=
-rhel_subscription_pool=Employee SKU
-
-# number of nodes of each type
-compute_nodes=2
-storage_nodes=3
-
-# node hostname prefix
-ocp_hostname_prefix=
-
-# Deploy OpenShift Web Console
-web_console_install=false
-
-# OpenShift SDN (default value redhat/openshift-ovs-subnet)
-openshift_sdn=redhat/openshift-ovs-subnet
-
-########################
-# CRI-O runtime support
-openshift_use_crio=false
-########################
-
-# persistent container storage: none, crs, cns
-container_storage=none
-
-# Disk sizes in Gb of the persistent container storage per CNS/CRS node.
-# Examples:
-# 'container_storage_disks=100' means we create 1 disk (sdd) of 100 Gb.
-# 'container_storage_disks=100,600,200' means we create 3 disks where
-# first disk (sdd) is of 100 Gb in size, second (sde) is of 600 Gb and
-# third (sdf) is of 200Gb.
-container_storage_disks=100,600
-
-# Following is used for deployment of block hosting volume for CNS when
-# Block storage is enabled.
-container_storage_block_hosting_volume_size=99
-
-# Use 'additional_disks_to_storage_nodes' option to specify storage devices
-# that should be attached to the storage nodes, but not used.
-additional_disks_to_storage_nodes=100
-
-# persistent container storage disk type. Can be one of following:
-# 'thin', 'thick', 'zeroedThick', 'eagerZeroedThick', ... or any other
-# type supported by VMWare.
-container_storage_disk_type=thin
-
-# Seconds to wait for container storage pods to become ready.
-container_storage_glusterfs_timeout=600
-
-# String to use as secret key for performing heketi commands as admin
-# If not set then it will be autogenerated.
-heketi_admin_key=admin
-
-# String to use as secret key for performing heketi commands as user
-# If not set then it will be autogenerated.
-heketi_user_key=user
-
-# 'cns_automation_config_file_path' config option refers to the config file of
-# the 'glusterfs-containers-tests' repo, which stores automated tests.
-# If set, then all the deployment-specific data will be defined there.
-# If not set, then no actions will be performed.
-# If it is set and playbooks are run not using 'tox' then make sure 'yedit'
-# module for ansible is enabled. It can be found using either of following links:
-# - https://github.com/kwoodson/yedit (Author)
-# - https://github.com/vponomaryov/yedit (forked)
-cns_automation_config_file_path=./cns-automation-config.yaml
diff --git a/deployment/ocp-on-vmware.py b/deployment/ocp-on-vmware.py
deleted file mode 100755
index a099daae..00000000
--- a/deployment/ocp-on-vmware.py
+++ /dev/null
@@ -1,477 +0,0 @@
-#!/usr/bin/env python
-# set ts=4 sw=4 et
-import argparse
-import click
-import fileinput
-import json
-import os
-import random
-import requests
-import six
-from six.moves import configparser
-import sys
-import yaml
-
-
-class OCPOnVMWare(object):
-
- __name__ = 'OCPOnVMWare'
- console_port = 8443
- cluster_id = None
- deployment_type = None
- openshift_vers = None
- vcenter_host = None
- vcenter_username = None
- vcenter_password = None
- vcenter_template_name = None
- vcenter_folder = None
- vcenter_cluster = None
- vcenter_datacenter = None
- vcenter_datastore = None
- vcenter_resource_pool = None
- dns_zone = None
- app_dns_prefix = None
- vm_network = None
- rhel_subscription_user = None
- rhel_subscription_pass = None
- rhel_subscription_server = None
- rhel_subscription_pool = None
- no_confirm = False
- tag = None
- verbose = 0
- create_inventory = None
- compute_nodes = None
- ocp_hostname_prefix = None
- create_ocp_vars = None
- openshift_sdn = None
- container_storage = None
- openshift_disable_check = None
- wildcard_zone = None
- inventory_file = 'infrastructure.json'
- vmware_ini_path = None
- clean = None
- cns_automation_config_file_path = None,
- docker_registry_url = None
- docker_additional_registries = None
- docker_insecure_registries = None
- docker_image_tag = None
- ose_puddle_repo = None
- gluster_puddle_repo = None
- web_console_install = None
- disable_yum_update_and_reboot = None
- openshift_use_crio = None
-
- def __init__(self):
- self._parse_cli_args()
- self._read_ini_settings()
- self._create_inventory_file()
- self._create_ocp_vars()
- self._launch_refarch_env()
-
- def _parse_cli_args(self):
- """Command line argument processing"""
- tag_help = '''Skip to various parts of install valid tags include:
- - setup create the vCenter folder and resource pool
- - prod create and setup the OCP cluster
- - ocp-install install OCP on the prod VMs
- - ocp-config configure OCP on the prod VMs
- - clean unsubscribe and remove all VMs'''
- parser = argparse.ArgumentParser(
- description='Deploy VMs to vSphere and install/configure OCP',
- formatter_class=argparse.RawTextHelpFormatter)
- parser.add_argument(
- '--clean', action='store_true',
- help='Delete all nodes and unregister from RHN')
- parser.add_argument(
- '--create_inventory', action='store_true',
- help=('Deprecated and not used option. '
- 'Everything that is needed gets autocreated.'))
- parser.add_argument(
- '--create_ocp_vars', action='store_true',
- help='Deprecated and not used option.')
- parser.add_argument(
- '--no_confirm', action='store_true',
- help='Skip confirmation prompt')
- parser.add_argument('--tag', default=None, help=tag_help)
- parser.add_argument(
- '--verbose', default=None, action='store_true',
- help='Verbosely display commands')
- self.args = parser.parse_args()
- self.verbose = self.args.verbose
- self.tag = self.args.tag
- self.no_confirm = self.args.no_confirm
- self.clean = self.args.clean
-
- def _is_rpm_and_image_tag_compatible(self):
- if not (self.docker_image_tag and self.ose_puddle_repo):
- return True
- url = self.ose_puddle_repo
- if url[-1] == '/':
- url += 'Packages/'
- else:
- url += '/Packages/'
- resp = requests.get(url)
- if resp.ok:
- v = self.docker_image_tag.split('v')[-1].strip().split('-')[0]
- return (('atomic-openshift-%s' % v) in resp.text)
- raise Exception(
- "Failed to pull list of packages from '%s' url." % url)
-
- def _read_ini_settings(self):
- """Read ini file settings."""
-
- scriptbasename = "ocp-on-vmware"
- defaults = {'vmware': {
- 'ini_path': os.path.join(
- os.path.dirname(__file__), '%s.ini' % scriptbasename),
- 'console_port': '8443',
- 'container_storage': 'none',
- 'deployment_type': 'openshift-enterprise',
- 'openshift_vers': 'v3_11',
- 'vcenter_username': 'administrator@vsphere.local',
- 'vcenter_template_name': 'not-defined',
- 'vcenter_folder': 'ocp',
- 'vcenter_resource_pool': '/Resources/OCP3',
- 'app_dns_prefix': 'apps',
- 'vm_network': 'VM Network',
- 'cns_automation_config_file_path': '',
- 'docker_registry_url': '',
- 'docker_additional_registries': '',
- 'docker_insecure_registries': '',
- 'docker_image_tag': '',
- 'web_console_install': '',
- 'ose_puddle_repo': '',
- 'gluster_puddle_repo': '',
- 'rhel_subscription_pool': 'Employee SKU',
- 'openshift_sdn': 'redhat/openshift-ovs-subnet',
- 'compute_nodes': '2',
- 'ocp_hostname_prefix': 'openshift-on-vmware',
- 'tag': self.tag,
- 'openshift_disable_check': (
- 'docker_storage,docker_image_availability,disk_availability'),
- 'disable_yum_update_and_reboot': 'no',
- 'openshift_use_crio': 'false',
- }}
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
-
- # where is the config?
- self.vmware_ini_path = os.environ.get(
- 'VMWARE_INI_PATH', defaults['vmware']['ini_path'])
- self.vmware_ini_path = os.path.expanduser(
- os.path.expandvars(self.vmware_ini_path))
- config.read(self.vmware_ini_path)
-
- # apply defaults
- for k, v in defaults['vmware'].items():
- if not config.has_option('vmware', k):
- config.set('vmware', k, str(v))
-
- self.console_port = config.get('vmware', 'console_port')
- self.cluster_id = config.get('vmware', 'cluster_id')
- self.container_storage = config.get('vmware', 'container_storage')
- self.deployment_type = config.get('vmware', 'deployment_type')
- if os.environ.get('VIRTUAL_ENV'):
- self.openshift_vers = (
- 'v3_%s' % os.environ['VIRTUAL_ENV'].split('_')[-1].split(
- '.')[-1])
- else:
- self.openshift_vers = config.get('vmware', 'openshift_vers')
- self.vcenter_host = config.get('vmware', 'vcenter_host')
- self.vcenter_username = config.get('vmware', 'vcenter_username')
- self.vcenter_password = config.get('vmware', 'vcenter_password')
- self.vcenter_template_name = config.get(
- 'vmware', 'vcenter_template_name')
- self.vcenter_folder = config.get('vmware', 'vcenter_folder')
- self.vcenter_datastore = config.get('vmware', 'vcenter_datastore')
- self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
- self.vcenter_cluster = config.get('vmware', 'vcenter_cluster')
- self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
- self.vcenter_resource_pool = config.get(
- 'vmware', 'vcenter_resource_pool')
- self.dns_zone = config.get('vmware', 'dns_zone')
- self.app_dns_prefix = config.get('vmware', 'app_dns_prefix')
- self.vm_network = config.get('vmware', 'vm_network')
- self.ocp_hostname_prefix = config.get(
- 'vmware', 'ocp_hostname_prefix') or 'ansible-on-vmware'
- self.lb_host = '%s-master-0' % self.ocp_hostname_prefix
- self.cns_automation_config_file_path = config.get(
- 'vmware', 'cns_automation_config_file_path')
- self.docker_registry_url = (
- config.get('vmware', 'docker_registry_url') or '').strip()
- self.docker_additional_registries = config.get(
- 'vmware', 'docker_additional_registries')
- self.docker_insecure_registries = config.get(
- 'vmware', 'docker_insecure_registries')
- self.docker_image_tag = (
- config.get('vmware', 'docker_image_tag') or '').strip()
- self.web_console_install = (
- config.get('vmware', 'web_console_install') or '').strip()
- self.ose_puddle_repo = config.get('vmware', 'ose_puddle_repo')
- self.gluster_puddle_repo = config.get('vmware', 'gluster_puddle_repo')
- self.rhel_subscription_user = config.get(
- 'vmware', 'rhel_subscription_user')
- self.rhel_subscription_pass = config.get(
- 'vmware', 'rhel_subscription_pass')
- self.rhel_subscription_server = config.get(
- 'vmware', 'rhel_subscription_server')
- self.rhel_subscription_pool = config.get(
- 'vmware', 'rhel_subscription_pool')
- self.openshift_sdn = config.get('vmware', 'openshift_sdn')
- self.compute_nodes = config.get('vmware', 'compute_nodes')
- self.storage_nodes = config.get('vmware', 'storage_nodes')
- self.openshift_disable_check = config.get(
- 'vmware', 'openshift_disable_check').strip() or (
- 'docker_storage,docker_image_availability,disk_availability')
- self.disable_yum_update_and_reboot = config.get(
- 'vmware', 'disable_yum_update_and_reboot').strip() or 'no'
- self.openshift_use_crio = (
- config.get('vmware', 'openshift_use_crio') or '').strip()
- err_count = 0
-
- required_vars = {
- 'vcenter_datacenter': self.vcenter_datacenter,
- 'vcenter_host': self.vcenter_host,
- 'vcenter_password': self.vcenter_password,
- 'vcenter_template_name': self.vcenter_template_name,
- 'dns_zone': self.dns_zone,
- }
-
- for k, v in required_vars.items():
- if v == '':
- err_count += 1
- print "Missing %s " % k
- if (self.cns_automation_config_file_path
- and not os.path.exists(
- os.path.abspath(self.cns_automation_config_file_path))):
- err_count += 1
- print ("Wrong value for 'cns_automation_config_file_path' "
- "config option. It is expected to be either a relative "
- "or an absolute file path.")
- else:
- self.cns_automation_config_file_path = os.path.abspath(
- self.cns_automation_config_file_path)
- if self.docker_image_tag and self.docker_registry_url:
- vers_from_reg = self.docker_registry_url.split(':')[-1].strip()
- if not vers_from_reg == self.docker_image_tag:
- err_count += 1
- print ("If 'docker_image_tag' and 'docker_registry_url' are "
- "specified, then their image tags should match. "
- "docker_image_tag='%s', docker_registry_url='%s'" % (
- self.docker_image_tag, self.docker_registry_url))
- if not self._is_rpm_and_image_tag_compatible():
- err_count += 1
- print ("OCP RPM versions and docker image tag do not match. "
- "Need either to change 'ose_puddle_repo' or "
- "'docker_image_tag' config options.")
- allowed_disable_checks = (
- 'disk_availability',
- 'docker_image_availability',
- 'docker_storage',
- 'memory_availability',
- 'package_availability',
- 'package_version',
- )
- self.openshift_disable_check_data = [
- el.strip()
- for el in self.openshift_disable_check.strip().split(',')
- if el.strip()
- ]
- if not all([(s in allowed_disable_checks)
- for s in self.openshift_disable_check_data]):
- err_count += 1
- print ("'openshift_disable_check' is allowed to have only "
- "following values separated with comma: %s.\n "
- "Got following value: %s" % (','.join(
- allowed_disable_checks), self.openshift_disable_check))
-
- if err_count > 0:
- print "Please fill out the missing variables in %s " % (
- self.vmware_ini_path)
- exit(1)
- self.wildcard_zone = "%s.%s" % (self.app_dns_prefix, self.dns_zone)
-
- if not self.cluster_id:
- # Create a unique cluster_id first
- self.cluster_id = ''.join(
- random.choice('0123456789abcdefghijklmnopqrstuvwxyz')
- for i in range(7))
- config.set('vmware', 'cluster_id', self.cluster_id)
- for line in fileinput.input(self.vmware_ini_path, inplace=True):
- if line.startswith('cluster_id'):
- print "cluster_id=" + str(self.cluster_id)
- else:
- print line,
-
- print 'Configured inventory values:'
- for each_section in config.sections():
- for (key, val) in config.items(each_section):
- if 'pass' in key:
- print '\t %s: ******' % (key)
- else:
- print '\t %s: %s' % (key, val)
- print '\n'
-
- def _create_inventory_file(self):
- click.echo('Configured inventory values:')
- click.echo('\tcompute_nodes: %s' % self.compute_nodes)
- click.echo('\tdns_zone: %s' % self.dns_zone)
- click.echo('\tapp_dns_prefix: %s' % self.app_dns_prefix)
- click.echo('\tocp_hostname_prefix: %s' % self.ocp_hostname_prefix)
- click.echo('\tUsing values from: %s' % self.vmware_ini_path)
- click.echo("")
- if not self.no_confirm:
- click.confirm('Continue using these values?', abort=True)
-
- master_name = "%s-master-0" % self.ocp_hostname_prefix
- d = {'host_inventory': {master_name: {
- 'guestname': master_name,
- 'guesttype': 'master',
- 'tag': str(self.cluster_id) + '-master',
- }}}
- for i in range(0, int(self.compute_nodes)):
- compute_name = "%s-compute-%d" % (self.ocp_hostname_prefix, i)
- d['host_inventory'][compute_name] = {
- 'guestname': compute_name,
- 'guesttype': 'compute',
- 'tag': '%s-compute' % self.cluster_id,
- }
-
- with open(self.inventory_file, 'w') as outfile:
- json.dump(d, outfile, indent=4, sort_keys=True)
-
- if self.args.create_inventory:
- exit(0)
-
- def _create_ocp_vars(self):
- if self.args.create_ocp_vars:
- click.echo(
- "No-op run. '--create_ocp_vars' option is not used anymore. "
- "Ending execution.")
- exit(0)
-
- def _launch_refarch_env(self):
- with open(self.inventory_file, 'r') as f:
- print yaml.safe_dump(json.load(f), default_flow_style=False)
-
- if not self.args.no_confirm:
- if not click.confirm('Continue adding nodes with these values?'):
- sys.exit(0)
-
- # Add section here to modify inventory file based on input
- # from user check your vmmark scripts for parsing the file and
- # adding the values.
- for line in fileinput.input(
- "inventory/vsphere/vms/vmware_inventory.ini", inplace=True):
- if line.startswith("server="):
- print "server=" + self.vcenter_host
- elif line.startswith("password="):
- print "password=" + self.vcenter_password
- elif line.startswith("username="):
- print "username=" + self.vcenter_username
- else:
- print line,
-
- if self.clean is True:
- tags = 'clean'
- elif self.tag:
- tags = self.tag
- else:
- tags = [
- 'setup',
- 'prod',
- 'ocp-install',
- 'ocp-configure',
- ]
- tags = ",".join(tags)
-
- # remove any cached facts to prevent stale data during a re-run
- command = 'rm -rf .ansible/cached_facts'
- os.system(command)
-
- playbook_vars_dict = {
- 'vcenter_host': self.vcenter_host,
- 'vcenter_username': self.vcenter_username,
- 'vcenter_password': self.vcenter_password,
- 'vcenter_template_name': self.vcenter_template_name,
- 'vcenter_folder': self.vcenter_folder,
- 'vcenter_cluster': self.vcenter_cluster,
- 'vcenter_datacenter': self.vcenter_datacenter,
- 'vcenter_datastore': self.vcenter_datastore,
- 'vcenter_resource_pool': self.vcenter_resource_pool,
- 'dns_zone': self.dns_zone,
- 'app_dns_prefix': self.app_dns_prefix,
- 'vm_network': self.vm_network,
- 'lb_host': self.lb_host,
- 'cns_automation_config_file_path': (
- self.cns_automation_config_file_path),
- 'ose_puddle_repo': self.ose_puddle_repo,
- 'gluster_puddle_repo': self.gluster_puddle_repo,
- 'wildcard_zone': self.wildcard_zone,
- 'console_port': self.console_port,
- 'cluster_id': self.cluster_id,
- 'deployment_type': self.deployment_type,
- 'openshift_vers': self.openshift_vers,
- 'rhsm_user': self.rhel_subscription_user,
- 'rhsm_password': self.rhel_subscription_pass,
- 'rhsm_satellite': self.rhel_subscription_server,
- 'rhsm_pool': self.rhel_subscription_pool,
- 'openshift_sdn': self.openshift_sdn,
- 'openshift_use_openshift_sdn': True,
- 'container_storage': self.container_storage,
- 'ocp_hostname_prefix': self.ocp_hostname_prefix,
- 'disable_yum_update_and_reboot': self.disable_yum_update_and_reboot
- }
- if self.openshift_disable_check_data:
- playbook_vars_dict["openshift_disable_check"] = (
- ','.join(self.openshift_disable_check_data))
- if self.docker_registry_url:
- playbook_vars_dict['oreg_url'] = self.docker_registry_url
- if self.docker_additional_registries:
- playbook_vars_dict['openshift_docker_additional_registries'] = (
- self.docker_additional_registries)
- playbook_vars_dict['openshift_docker_ent_reg'] = ''
- if self.docker_insecure_registries:
- playbook_vars_dict['openshift_docker_insecure_registries'] = (
- self.docker_insecure_registries)
- if self.docker_image_tag:
- playbook_vars_dict['openshift_image_tag'] = self.docker_image_tag
- if self.web_console_install:
- playbook_vars_dict['openshift_web_console_install'] = (
- self.web_console_install)
- if self.openshift_vers == 'v3_11':
- if self.openshift_use_crio:
- playbook_vars_dict['openshift_use_crio'] = (
- self.openshift_use_crio)
- playbook_vars_dict['openshift_use_crio_only'] = (
- self.openshift_use_crio)
- playbook_vars_dict['openshift_crio_enable_docker_gc'] = (
- self.openshift_use_crio)
- else:
- playbook_vars_dict['openshift_use_crio'] = 'false'
- if self.openshift_vers in ('v3_6', 'v3_7'):
- playbook_vars_dict['docker_version'] = '1.12.6'
-
- playbook_vars_str = ' '.join('%s=%s' % (k, v)
- for (k, v) in playbook_vars_dict.items())
-
- command = (
- "ansible-playbook"
- " --extra-vars '@./infrastructure.json'"
- " --tags %s"
- " -e '%s' playbooks/ocp-end-to-end.yaml"
- ) % (tags, playbook_vars_str)
-
- if self.verbose > 0:
- command += " -vvvvvv"
-
- click.echo('We are running: %s' % command)
- status = os.system(command)
- if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
- sys.exit(os.WEXITSTATUS(status))
-
-
-if __name__ == '__main__':
- OCPOnVMWare()
diff --git a/deployment/playbooks/add-node-prerequisite.yaml b/deployment/playbooks/add-node-prerequisite.yaml
deleted file mode 100644
index f43b3545..00000000
--- a/deployment/playbooks/add-node-prerequisite.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: new_nodes
- gather_facts: yes
- become: yes
- vars_files:
- - vars/main.yaml
- roles:
- - package-repos
-
-- hosts: new_nodes
- gather_facts: no
- become: yes
- vars_files:
- - vars/main.yaml
- roles:
- - prerequisites
diff --git a/deployment/playbooks/add-node.yaml b/deployment/playbooks/add-node.yaml
deleted file mode 100644
index 51971644..00000000
--- a/deployment/playbooks/add-node.yaml
+++ /dev/null
@@ -1,142 +0,0 @@
----
-- hosts: localhost
- connection: local
- gather_facts: no
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - create-vm-add-prod-ose
- - setup-custom-domain-names-for-ansible-runner
-
-- hosts: new_nodes
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - instance-groups
- - package-repos
- - vmware-guest-setup
- - cloud-provider-setup
- - docker-storage-setup
- - openshift-volume-quota
-
-# 'openshift_node_groups' var started being required since OCP3.10
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - set_fact:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- include: add-node-prerequisite.yaml
- when: openshift_vers in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Map domain names and IP addresses of old and new nodes to each other
- hosts: master, compute, cns, crs, !new_nodes
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
-
-- include: node-setup.yaml
-
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - name: Make sure dnsmasq is running, enabled and restarted
- service: name=dnsmasq state=restarted enabled=yes
-
-- hosts: localhost
- gather_facts: no
- become: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/clean.yaml b/deployment/playbooks/clean.yaml
deleted file mode 100644
index 68da95ec..00000000
--- a/deployment/playbooks/clean.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- hosts: localhost
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
-
-- hosts: allnodes
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - rhsm-unregister
-
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Delete all added VMs
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ hostvars[item].inventory_hostname }}"
- datacenter: "{{ vcenter_datacenter }}"
- cluster: "{{ vcenter_cluster }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- folder: "/{{ vcenter_datacenter }}/vm/{{ vcenter_folder }}"
- state: absent
- force: true
- with_items: "{{ groups['allnodes'] }}"
-
- - name: Get current user home dir
- shell: 'eval echo "~$USER"'
- register: home_dir
- - name: Set hosts files paths
- set_fact:
- home_hosts_file: "{{ home_dir.stdout_lines[0] + '/.ssh/config' }}"
- system_hosts_file: "/etc/hosts"
- - name: Check 'write' permissions for system hosts file
- stat:
- path: "{{ system_hosts_file }}"
- register: stat_system_hosts
-
- - name: Update system hosts file if writeable
- lineinfile:
- dest: "{{ system_hosts_file }}"
- state: absent
- regexp: "{{ hostvars[item].inventory_hostname }}"
- create: true
- with_items: "{{ groups['allnodes'] }}"
- when: "stat_system_hosts.stat.writeable"
- - name: Update user's SSH hosts file
- lineinfile:
- dest: "{{ home_hosts_file }}"
- state: present
- line: "Host obsolete-{{ item }}"
- regexp: "Host {{ item }}"
- create: true
- mode: '644'
- with_items: "{{ groups['allnodes'] }}"
diff --git a/deployment/playbooks/cleanup-cns.yaml b/deployment/playbooks/cleanup-cns.yaml
deleted file mode 100644
index 5a2d8497..00000000
--- a/deployment/playbooks/cleanup-cns.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
-
-- hosts: cns
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - rhsm-unregister
-
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Delete cns VMs
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- folder: "/{{ vcenter_folder }}"
- name: "{{ item.value.guestname }}"
- state: absent
- force: true
- with_dict: "{{host_inventory}}"
- when: "'cns' in item.value.guestname"
diff --git a/deployment/playbooks/cleanup-crs.yaml b/deployment/playbooks/cleanup-crs.yaml
deleted file mode 100644
index 3d6ee533..00000000
--- a/deployment/playbooks/cleanup-crs.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
-
-- hosts: crs
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - rhsm-unregister
-
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Delete crs VMs
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- folder: "/{{ vcenter_folder }}"
- name: "{{ item.value.guestname }}"
- state: absent
- force: true
- with_dict: "{{host_inventory}}"
- when: "'crs' in item.value.guestname"
diff --git a/deployment/playbooks/cns-node-setup.yaml b/deployment/playbooks/cns-node-setup.yaml
deleted file mode 100644
index c5293619..00000000
--- a/deployment/playbooks/cns-node-setup.yaml
+++ /dev/null
@@ -1,131 +0,0 @@
----
-- hosts: cns
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - instance-groups
- - package-repos
- - vmware-guest-setup
- - cloud-provider-setup
- - docker-storage-setup
- - openshift-volume-quota
- - gluster-ports
-
-# 'openshift_node_groups' var started being required since OCP3.10
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - set_fact:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- include: add-node-prerequisite.yaml
- when: openshift_vers in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Map domain names and IP addresses of old and new nodes to each other
- hosts: master, compute, crs
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
-
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - name: Make sure dnsmasq is running, enabled and restarted
- service: name=dnsmasq state=restarted enabled=yes
-
-- hosts: localhost
- gather_facts: no
- become: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/cns-setup.yaml b/deployment/playbooks/cns-setup.yaml
deleted file mode 100644
index ce17cc08..00000000
--- a/deployment/playbooks/cns-setup.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
----
-- hosts: cns
- tasks:
- - name: Install required kernel modules on CNS nodes
- import_role:
- name: openshift_storage_glusterfs
- tasks_from: kernel_modules.yml
-
-- name: Restart dnsmasq to make our custom configs take effect
- hosts: allnodes
- tasks:
- - service:
- name: dnsmasq
- state: restarted
-
-- hosts: single_master
- tasks:
- - name: Perform actions on master node which are required to install CNS
- import_role:
- name: openshift_storage_glusterfs
- vars:
- openshift_storage_glusterfs_name: 'storage'
- openshift_storage_glusterfs_namespace: 'storage'
- openshift_storage_glusterfs_is_native: true
- openshift_storage_glusterfs_storageclass: true
- openshift_storage_glusterfs_block_storageclass: true
- openshift_storage_glusterfs_s3_deploy: false
- openshift_storage_glusterfs_heketi_admin_key: "{{
- (dp_tool_heketi_admin_key.strip() != '') |
- ternary(dp_tool_heketi_admin_key.strip(), omit) }}"
- openshift_storage_glusterfs_heketi_user_key: "{{
- (dp_tool_heketi_user_key.strip() != '') |
- ternary(dp_tool_heketi_user_key.strip(), omit) }}"
- openshift_storage_glusterfs_heketi_topology_load: true
- - name: Allow to expand PVCs using 'glusterfs' storageclass.
- oc_edit:
- kind: sc
- name: glusterfs-{{ glusterfs_name }}
- content:
- allowVolumeExpansion: true
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Get IP address of the node with router
- hosts: single_master
- tasks:
- - command: "oc get endpoints router -o=custom-columns=:.subsets[*].addresses[0].ip -n default"
- register: router_get
- - set_fact:
- router_ip: "{{ router_get.stdout_lines[1].strip() }}"
- delegate_to: "{{ item }}"
- delegate_facts: True
- with_items: "{{ groups['allnodes'] }}"
-
-- name: Update dnsmasq config with custom domain zone for apps
- hosts: allnodes
- tasks:
- - lineinfile:
- path: /etc/dnsmasq.conf
- line: "address=/.{{ app_dns_prefix }}.{{ dns_zone }}/{{ router_ip }}"
- - service:
- name: dnsmasq
- state: restarted
-
-- hosts: single_master
- tasks:
- - name: Get IPv4 address of the main master node
- command: "python -c \"import yaml ;
- config = yaml.load(open('/etc/origin/master/master-config.yaml', 'r'));
- print(config['kubernetesMasterConfig']['masterIP'])
- \""
- register: master_ipv4
- - set_fact:
- master_ipv4: "{{ master_ipv4.stdout_lines[0] }}"
- - name: Read Heketi dc name
- shell: "oc get dc -n storage | grep heketi | awk '{ print $1}'"
- register: heketi_dc_name_raw
- - name: Read Heketi svc name
- shell: "oc get svc -n storage | grep -e heketi | grep -v heketi-db | awk '{ print $1}'"
- register: heketi_svc_name_raw
- - name: Save Heketi DC and SVC names in separate vars
- set_fact:
- heketi_dc_name: "{{ heketi_dc_name_raw.stdout.strip() }}"
- heketi_svc_name: "{{ heketi_svc_name_raw.stdout.strip() }}"
- - command: "oc patch svc {{ heketi_svc_name }} --namespace storage
- -p '{\"spec\":{\"externalIPs\":[\"{{ master_ipv4 }}\"]}}'"
- run_once: true
-
-# Following updates config file
-# which is required for automated tests from 'cns-automation' repo
-
-- name: Update 'cns-automation' config file
- hosts: localhost
- tasks:
- - set_fact:
- master_ipv4: "{{ hostvars[groups['single_master'][0]].master_ipv4 }}"
- - yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: openshift.storage_project_name
- value: "storage"
- - key: openshift.heketi_config.heketi_dc_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_dc_name }}"
- - key: openshift.heketi_config.heketi_service_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_svc_name }}"
- - key: openshift.heketi_config.heketi_client_node
- value: "{{ master_ipv4 }}"
- - key: openshift.heketi_config.heketi_server_url
- value: "http://{{ master_ipv4 }}:8080"
- - key: openshift.heketi_config.heketi_cli_user
- value: 'admin'
- - key: openshift.heketi_config.heketi_cli_key
- value: "{{ dp_tool_heketi_admin_key }}"
- - key: openshift.dynamic_provisioning.storage_classes
- value:
- file_storage_class:
- provisioner: "kubernetes.io/glusterfs"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- secretnamespace: "storage"
- volumenameprefix: "autotests-file"
- block_storage_class:
- provisioner: "gluster.org/glusterblock"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- restsecretnamespace: "storage"
- volumenameprefix: "autotests-block"
- hacount: "3"
- chapauthenabled: "true"
- when: cns_automation_config_file_path | length > 0
- run_once: true
-
-- name: Install 'heketi-client' package on all the master nodes
- hosts: master
- gather_facts: no
- tasks:
- - name: Get Heketi POD name on master node
- command: "oc get pods -l glusterfs=heketi-storage-pod --no-headers {{ ''
- }}-o=custom-columns=:.metadata.name --namespace storage"
- register: heketi_pod_name
- - name: Read heketi-client package version from Heketi POD
- shell: "oc exec --namespace storage {{ heketi_pod_name.stdout_lines[0]
- }} -- rpm -q heketi-client --queryformat{{ ''
- }} '%{version}-%{release}\n' | cut -d '.' -f 1,2,3"
- register: heketi_client_version
- - name: Enable Gluster 3 repo on master node
- import_role:
- name: enable-gluster-repo
- - name: Remove existing heketi client from master node if present
- package:
- name: "heketi-client"
- state: absent
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- - name: Install heketi client on master node for CNS and CRS needs
- package:
- name: "heketi-client-{{heketi_client_version.stdout_lines[0].strip() }}*"
- state: present
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
diff --git a/deployment/playbooks/cns-storage.yaml b/deployment/playbooks/cns-storage.yaml
deleted file mode 100644
index 6df9dbd7..00000000
--- a/deployment/playbooks/cns-storage.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- include: prod-ose-cns.yaml
- tags: ['vms']
-
-- include: cns-node-setup.yaml
- tags: [ 'node-setup']
-
-- include: node-setup.yaml
- tags: [ 'node-setup']
-
-- include: cns-setup.yaml
- tags: [ 'node-setup']
-
-- include: cleanup-cns.yaml
- tags: ['clean']
diff --git a/deployment/playbooks/crs-node-setup.yaml b/deployment/playbooks/crs-node-setup.yaml
deleted file mode 100644
index c762f48a..00000000
--- a/deployment/playbooks/crs-node-setup.yaml
+++ /dev/null
@@ -1,123 +0,0 @@
----
-- hosts: crs
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - instance-groups
- - package-repos
- - vmware-guest-setup
- - crs-prerequisite
- - gluster-ports
-
-# 'openshift_node_groups' var started being required since OCP3.10
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - set_fact:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- hosts: crs
- gather_facts: no
- become: no
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Install required kernel modules on CRS nodes
- import_role:
- name: openshift_storage_glusterfs
- tasks_from: kernel_modules.yml
-
-- name: Map domain names and IP addresses of old and new nodes to each other
- hosts: master, compute, cns
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
-
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - name: be sure dnsmasq is running and enabled
- service: name=dnsmasq state=restarted enabled=yes
-
-- hosts: localhost
- gather_facts: no
- become: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/crs-setup.yaml b/deployment/playbooks/crs-setup.yaml
deleted file mode 100644
index fbba5f37..00000000
--- a/deployment/playbooks/crs-setup.yaml
+++ /dev/null
@@ -1,209 +0,0 @@
----
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
- ) }} hosts=single_master"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- hosts: single_master
- tasks:
- - name: Label common compute nodes be suitable for Heketi POD
- oc_label:
- name: '{{ item }}'
- kind: 'node'
- state: 'add'
- labels:
- - key: 'glusterfs'
- value: 'heketi-host'
- - key: 'heketi'
- value: 'heketi-host'
- with_items: "{{ groups[cluster_id + '-compute'] }}"
- ignore_errors: true
-
-# Prepare SSH key pair before CRS installation
-- hosts: localhost
- ignore_errors: no
- tasks:
- - name: Get home dir of the current user
- shell: "getent passwd $(whoami) | cut -d: -f6"
- register: user_home_dir
- - name: Define path for the SSH key
- set_fact:
- crs_ssh_keypath: "{{ user_home_dir.stdout_lines[0].strip() }}/.ssh/crs_nodes_{{
- cluster_id + '_' + (999999999999999 | random | string ) }}"
- - name: Generate SSH key pair for Heketi and CRS interactions
- shell: "yes y| ssh-keygen -b 2048 -t rsa -f {{ crs_ssh_keypath }} -q -N ''"
- args:
- creates: "{{ crs_ssh_keypath }}"
- - name: Read contents of the public SSH key
- command: "cat {{ crs_ssh_keypath }}.pub"
- register: crs_pub_key_raw
- - name: Save public SSH key data to the variable
- set_fact:
- crs_pub_key: "{{ crs_pub_key_raw.stdout_lines[0].strip() }}"
- - name: Copy public SSH key onto CRS nodes
- shell: "echo {{ crs_pub_key }} >> /root/.ssh/authorized_keys"
- delegate_to: "{{ item }}"
- delegate_facts: true
- with_items: "{{ groups[cluster_id + '-crs'] }}"
- - name: Set var with SSH key path for master nodes
- set_fact:
- crs_ssh_keypath: "{{ crs_ssh_keypath }}"
- delegate_to: "{{ item }}"
- delegate_facts: true
- with_items: "{{ groups[cluster_id + '-master'] }}"
-
-# Run CRS installation
-- hosts: single_master
- tasks:
- - name: Perform actions on master node which are required to install CRS
- import_role:
- name: openshift_storage_glusterfs
- vars:
- openshift_storage_glusterfs_name: 'storage'
- openshift_storage_glusterfs_namespace: 'storage'
- openshift_storage_glusterfs_is_native: false
- openshift_storage_glusterfs_heketi_is_native: true
- openshift_storage_glusterfs_heketi_admin_key: "{{
- (dp_tool_heketi_admin_key.strip() != '') |
- ternary(dp_tool_heketi_admin_key.strip(), omit) }}"
- openshift_storage_glusterfs_heketi_user_key: "{{
- (dp_tool_heketi_user_key.strip() != '') |
- ternary(dp_tool_heketi_user_key.strip(), omit) }}"
- openshift_storage_glusterfs_storageclass: true
- openshift_storage_glusterfs_block_storageclass: true
- openshift_storage_glusterfs_s3_deploy: false
- openshift_storage_glusterfs_nodeselector: 'role=compute'
- openshift_storage_glusterfs_heketi_executor: 'ssh'
- openshift_storage_glusterfs_heketi_ssh_keyfile: "{{ crs_ssh_keypath }}"
- - name: Allow to expand PVCs using 'glusterfs' storageclass.
- oc_edit:
- kind: sc
- name: glusterfs-{{ glusterfs_name }}
- content:
- allowVolumeExpansion: true
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Get IP address of the node with router
- hosts: single_master
- tasks:
- - command: "oc get endpoints router -o=custom-columns=:.subsets[*].addresses[0].ip -n default"
- register: router_get
- - set_fact:
- router_ip: "{{ router_get.stdout_lines[1].strip() }}"
- delegate_to: "{{ item }}"
- delegate_facts: True
- with_items: "{{ groups['allnodes'] }}"
-
-- name: Restart dnsmasq on all the nodes to apply all the changes we made
- hosts: allnodes
- tasks:
- - lineinfile:
- path: /etc/dnsmasq.conf
- line: "address=/.{{ app_dns_prefix }}.{{ dns_zone }}/{{ router_ip }}"
- - service:
- name: dnsmasq
- state: restarted
-
-- hosts: single_master
- tasks:
- - name: Get IPv4 address of the main master node
- command: "python -c \"import yaml ;
- config = yaml.load(open('/etc/origin/master/master-config.yaml', 'r'));
- print(config['kubernetesMasterConfig']['masterIP'])
- \""
- register: master_ipv4
- - set_fact:
- master_ipv4: "{{ master_ipv4.stdout_lines[0] }}"
- - name: Read Heketi dc name
- shell: "oc get dc -n storage | grep heketi | awk '{ print $1}'"
- register: heketi_dc_name_raw
- - name: Read Heketi svc name
- shell: "oc get svc -n storage | grep -e heketi | grep -v heketi-db | awk '{ print $1}'"
- register: heketi_svc_name_raw
- - name: Save Heketi DC and SVC names in separate vars
- set_fact:
- heketi_dc_name: "{{ heketi_dc_name_raw.stdout.strip() }}"
- heketi_svc_name: "{{ heketi_svc_name_raw.stdout.strip() }}"
- - command: "oc patch svc {{ heketi_svc_name }} --namespace storage
- -p '{\"spec\":{\"externalIPs\":[\"{{ master_ipv4 }}\"]}}'"
- run_once: true
-
-# Following updates config file
-# which is required for automated tests from 'cns-automation' repo
-
-- name: Update 'cns-automation' config file
- hosts: localhost
- tasks:
- - set_fact:
- master_ipv4: "{{ hostvars[groups['single_master'][0]].master_ipv4 }}"
- - yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: openshift.storage_project_name
- value: "storage"
- - key: openshift.heketi_config.heketi_dc_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_dc_name }}"
- - key: openshift.heketi_config.heketi_service_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_svc_name }}"
- - key: openshift.heketi_config.heketi_client_node
- value: "{{ master_ipv4 }}"
- - key: openshift.heketi_config.heketi_server_url
- value: "http://{{ master_ipv4 }}:8080"
- - key: openshift.heketi_config.heketi_cli_user
- value: 'admin'
- - key: openshift.heketi_config.heketi_cli_key
- value: "{{ dp_tool_heketi_admin_key }}"
- - key: openshift.dynamic_provisioning.storage_classes
- value:
- file_storage_class:
- provisioner: "kubernetes.io/glusterfs"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- secretnamespace: "storage"
- volumenameprefix: "autotests-file"
- block_storage_class:
- provisioner: "gluster.org/glusterblock"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- restsecretnamespace: "storage"
- volumenameprefix: "autotests-block"
- hacount: "3"
- chapauthenabled: "true"
- when: cns_automation_config_file_path | length > 0
- run_once: true
-
-- name: Install 'heketi-client' package on all the master nodes
- hosts: master
- gather_facts: no
- tasks:
- - name: Get Heketi POD name on master node
- command: "oc get pods -l glusterfs=heketi-storage-pod --no-headers {{ ''
- }}-o=custom-columns=:.metadata.name --namespace storage"
- register: heketi_pod_name
- - name: Read heketi-client package version from Heketi POD
- shell: "oc exec --namespace storage {{ heketi_pod_name.stdout_lines[0]
- }} -- rpm -q heketi-client --queryformat{{ ''
- }} '%{version}-%{release}\n' | cut -d '.' -f 1,2,3"
- register: heketi_client_version
- - name: Enable Gluster 3 repo on master node
- import_role:
- name: enable-gluster-repo
- - name: Remove existing heketi client from master node if present
- package:
- name: "heketi-client"
- state: absent
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- - name: Install heketi client on master node for CNS and CRS needs
- package:
- name: "heketi-client-{{heketi_client_version.stdout_lines[0].strip() }}*"
- state: present
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
diff --git a/deployment/playbooks/crs-storage.yaml b/deployment/playbooks/crs-storage.yaml
deleted file mode 100644
index cee0da69..00000000
--- a/deployment/playbooks/crs-storage.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- include: prod-ose-crs.yaml
- tags: ['vms']
-
-- include: crs-node-setup.yaml
- tags: [ 'node-setup' ]
-
-- include: crs-setup.yaml
- tags: [ 'node-setup']
-
-- include: cleanup-crs.yaml
- tags: ['clean']
diff --git a/deployment/playbooks/gather_logs.yaml b/deployment/playbooks/gather_logs.yaml
deleted file mode 100644
index 33b9114f..00000000
--- a/deployment/playbooks/gather_logs.yaml
+++ /dev/null
@@ -1,883 +0,0 @@
-# Expected vars:
-#
-# - 'config_filepath' - required. It is expected to be provided and must
-# store filepath for the config file used by automated test caes.
-# Current playbook will take all the nodes info from it.
-#
-# - 'output_artifacts_dir' - optional. It is directory where should be saved
-# generated/gathered files.
-#
-# Command to run this playbook:
-#
-# $ tox -e ocp3.11 -- ansible-playbook -i 127.0.0.1, \
-# playbooks/gather_logs.yaml \
-# -e config_filepath=/path/to/the/cns-automation-config.yaml \
-# -e output_artifacts_dir=../cluster_logs/
-
----
-- hosts: localhost
- connection: local
- gather_facts: no
- tasks:
- - name: Process config file and find all the nodes of an OpenShift cluster
- command: "python -c \"import yaml ;
- config = yaml.load(open('{{ config_filepath }}', 'r'));
- print(' '.join(list(config['ocp_servers']['master'].keys())));
- print(' '.join(list(config['ocp_servers']['nodes'].keys())));
- print(' '.join(list(config['gluster_servers'].keys())));
- print(config.get('openshift', config.get('cns',
- {}))['heketi_config']['heketi_server_url']);
- print(config.get('openshift', config.get('cns',
- {}))['heketi_config']['heketi_cli_user']);
- print(config.get('openshift', config.get('cns',
- {}))['heketi_config']['heketi_cli_key'])\""
- register: config_data
- - debug:
- msg: "{{ config_data }}"
- - name: Save config data to the host vars
- set_fact:
- master_nodes: "{{ config_data.stdout_lines[0].split(' ') }}"
- compute_nodes: "{{ config_data.stdout_lines[1].split(' ') }}"
- gluster_nodes: "{{ config_data.stdout_lines[2].split(' ') }}"
- heketi_server_url: "{{ config_data.stdout_lines[3] }}"
- heketi_cli_user: "{{ config_data.stdout_lines[4] }}"
- heketi_cli_key: "{{ config_data.stdout_lines[5] }}"
- - name: Print list of master nodes IP addresses
- debug:
- msg: "{{ master_nodes }}"
- - name: Print list of compute nodes IP addresses
- debug:
- msg: "{{ compute_nodes }}"
- - name: Print list of gluster nodes IP addresses
- debug:
- msg: "{{ gluster_nodes }}"
- - name: Add gathered master IP addresses to the Ansible host list
- add_host:
- hostname: "{{ item }}"
- ansible_host: "{{ item }}"
- groups: logs_ocp_nodes, logs_single_master_node, logs_master_nodes
- with_items: "{{ master_nodes }}"
- - name: Add gathered compute IP addresses to the Ansible host list
- add_host:
- hostname: "{{ item }}"
- ansible_host: "{{ item }}"
- groups: logs_ocp_nodes, logs_compute_nodes
- with_items: "{{ compute_nodes }}"
- - name: Add gathered gluster IP addresses to the Ansible host list
- add_host:
- hostname: "{{ item }}"
- ansible_host: "{{ item }}"
- groups: logs_ocp_nodes, logs_gluster_nodes
- with_items: "{{ gluster_nodes }}"
-
-- hosts: all
- gather_facts: no
- tasks:
- - set_fact:
- output_artifacts_dir: "{{
- (output_artifacts_dir | default('../cluster_logs/')) }}"
- - set_fact:
- output_artifacts_dir: "{{ output_artifacts_dir
- }}{% if output_artifacts_dir[-1] != '/' %}{{ '/' }}{% endif %}"
- - name: Get hostname of the current host
- shell: "hostname"
- register: hostname
- - name: Save hostname to the var
- set_fact:
- current_hostname: "{{ hostname.stdout_lines[0].strip() }}"
- separator_line: "{{ '=' * 79 }}"
- - name: Create output artifacts directory if absent
- delegate_to: localhost
- file:
- path: "{{ output_artifacts_dir }}"
- state: directory
- run_once: yes
- - name: Install 'rsync' package which is required by 'synchronize' module
- yum:
- name: rsync
- state: installed
- ignore_errors: yes
-
-# All nodes SOS reports
-- hosts: logs_ocp_nodes
- gather_facts: no
- tasks:
- - name: Install 'sos' package if absent
- package:
- name: sos
- state: present
- - name: Run sosreport command
- shell: "sosreport --batch --verbose --tmp-dir . --label customuniquelabel"
- register: sosreport_output
- - name: Print the output of the sosreport command
- debug:
- msg: "{{ sosreport_output }}"
- - name: Get name of the generated sos-file
- shell: 'echo -e "{{ sosreport_output.stdout }}" | grep customuniquelabel'
- register: sos_filepath
- - name: Copy generated files to the localhost
- fetch:
- src: "{{ sos_filepath.stdout_lines[0].strip() }}"
- dest: "{{ output_artifacts_dir }}sosreports/"
- flat: yes
- fail_on_missing: yes
-
-- hosts: logs_single_master_node
- gather_facts: no
- vars:
- master_package_list:
- - docker
- - heketi
- master_service_list:
- - docker
- - multipathd
- heketi_pod_package_list:
- - gluster
- - heketi
- gluster_pod_package_list:
- - gluster
- - heketi
- - targetcli
- - gluster-block
- - tcmu-runner
- - python-configshell
- - python-rtslib
- gluster_pod_service_list:
- - glusterd
- - heketi
- - gluster-blockd
- - gluster-block-target
- - tcmu-runner
- - rpcbind
- tasks:
-
- # Master node info
- - name: Get distro version
- shell: "uname -a"
- register: master_linux_kernel_version
- - name: Get Red Hat release info
- shell: "cat /etc/redhat-release"
- register: master_rh_release
- - name: Create grep filter with all the packages we are interested in
- set_fact:
- package_filter: "{{ package_filter | default('grep') + ' -e ' + item }}"
- with_items: "{{ master_package_list }}"
- - name: Get list of installed packages we are interested in
- shell: "rpm -qa | {{ package_filter }}"
- register: master_packages
- - name: Get status of services on OCP Master node
- shell: "systemctl list-units {{ master_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend"
- register: master_services
- - name: Get OpenShift client version
- shell: "(oc version | grep -e 'oc ' -e 'openshift' -e 'kube') ||
- echo failed_to_get_oc_version_info"
- register: master_oc_version
- - name: Get list of OCP nodes
- shell: "oc get nodes -o wide || echo failed_to_get_list_of_nodes"
- register: master_ocp_nodes
- - name: Get info about all the docker images used in OCP cluster
- shell: "(oc get pods --all-namespaces
- -o=custom-columns=:.status.containerStatuses[*].image | grep -v -e '^$' | uniq) ||
- echo failed_to_get_list_of_images"
- register: master_image_info
- - name: Write master data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ output_artifacts_dir }}master_data.yaml"
- state: present
- edits:
- - key: master
- value:
- Linux kernel version: "{{ master_linux_kernel_version.stdout_lines }}"
- Red Hat release info: "{{ master_rh_release.stdout_lines }}"
- List of Packages: "{{ master_packages.stdout_lines }}"
- List of services: "{{ master_services.stdout_lines }}"
- OC Version: "{{ master_oc_version.stdout_lines }}"
- OCP nodes: "{{ master_ocp_nodes.stdout_lines }}"
- Images info: "{{ master_image_info.stdout_lines }}"
-
- # Heketi POD logs, config and DB dump
- - name: Get heketi POD
- shell: "oc get pods --all-namespaces -l heketi --no-headers
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: heketi_pods
- retries: 10
- delay: 6
- until: heketi_pods is succeeded
- - name: DEBUG HEKETI
- debug:
- msg: "{{ heketi_pods }}"
- - block:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}heketi_pod/"
- - name: Create compute directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- - name: Set Heketi POD name and Heketi namespace as vars
- set_fact:
- heketi_pod_name: "{{
- (heketi_pods.stdout_lines[0].split(' ') | list)[0] }}"
- heketi_pod_namespace: "{{
- (heketi_pods.stdout_lines[0].split(' ') | list)[-1] }}"
- - name: Set Heketi pod command prefix
- set_fact:
- heketi_pod_cmd_prefix: "oc exec {{ heketi_pod_name
- }} --namespace {{ heketi_pod_namespace }} --"
- - name: Get the Heketi config from the Heketi POD
- shell: 'echo -e "$({{ heketi_pod_cmd_prefix
- }} cat /etc/heketi/heketi.json ||
- echo error_failed_to_get_the_heketi_config_file)" |
- tee /tmp/heketi_config.json'
- - name: Copy the Heketi config
- fetch:
- src: "/tmp/heketi_config.json"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- - name: Save Heketi POD logs
- shell: "(oc logs {{ heketi_pod_name }} ||
- echo 'ERROR! Failed to get the Heketi logs.') > /tmp/heketi.log"
- - name: Copy the Heketi logs
- fetch:
- src: "/tmp/heketi.log"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- - name: Dump the Heketi DB
- shell: 'echo -e "$({{ heketi_pod_cmd_prefix }} heketi-cli --server {{
- hostvars["localhost"]["heketi_server_url"] }} --user {{
- hostvars["localhost"]["heketi_cli_user"] }} --secret {{
- hostvars["localhost"]["heketi_cli_key"]
- }} db dump ||
- echo \{\"error\"\:\"failed_to_get_the_Heketi_db_dump\"\})" |
- python -m json.tool > /tmp/heketi_db_dump.json'
- - name: Copy the Heketi DB dump
- fetch:
- src: "/tmp/heketi_db_dump.json"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- - name: Get storage release version from Heketi POD
- shell: "{{ heketi_pod_cmd_prefix }} cat /etc/redhat-storage-release ||
- echo failed_to_read_redhat_storage_release_info"
- register: heketi_pod_storage_release_version
- - name: Get info about packages on Heketi POD
- shell: "{{ heketi_pod_cmd_prefix }} rpm -qa | grep -e {{
- heketi_pod_package_list | join(' -e ') }} ||
- echo failed_to_read_list_of_installed_packages"
- register: heketi_pod_packages
- - name: Write Heketi data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ dir_path }}heketi_pod_data.yaml"
- state: present
- edits:
- - key: "Storage_release_version"
- value: "{{ heketi_pod_storage_release_version.stdout_lines }}"
- - key: "List_of_Packages"
- value: "{{ heketi_pod_packages.stdout_lines }}"
- when: "((heketi_pods.stdout_lines | join('')).strip() | length) > 0"
-
- # Gluster PODs
- - name: Get list of Gluster PODs
- shell: "oc get pods --all-namespaces -l glusterfs-node --no-headers
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: gluster_pods
- retries: 10
- delay: 6
- until: gluster_pods is succeeded
- - name: DEBUG GLUSTER
- debug:
- msg: "{{ gluster_pods }}"
- - name: Create var describing the Gluster cluster deployment type
- set_fact:
- is_gluster_containerized: "{{
- ((gluster_pods.stdout_lines | join('')).strip() | length) > 0 }}"
- delegate_to: "{{ item }}"
- delegate_facts: yes
- with_items: "{{ groups['all'] }}"
- - block:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}gluster_pods/"
- - name: Create directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- - name: Define storage namespace
- set_fact:
- storage_namespace: "{{ (gluster_pods.stdout_lines[0].split(
- ' ') | list)[-1] }}"
- - name: Define gluster POD names
- set_fact:
- gluster_pod_names: "{{ (gluster_pod_names | default([])) +
- [(item.split(' ') | list)[0]] }}"
- with_items: "{{ gluster_pods.stdout_lines[0:] }}"
- - debug:
- msg: "{{ gluster_pod_names }}"
-
- - name: Get storage release version from Gluster PODs
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- cat /etc/redhat-storage-release) ||
- echo failed_to_get_redhat_storage_release_info"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_storage_release_version_results
- - name: Process gluster PODs storage release versions results
- set_fact:
- gluster_pod_storage_release_version_processed: "{{
- gluster_pod_storage_release_version_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_storage_release_version_results.results }}"
- - name: Get info about packages on Gluster PODs
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- rpm -qa | grep -e {{ gluster_pod_package_list | join(' -e ') }}) ||
- echo failed_to_get_packages_info_from_gluster_pod"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_package_list_results
- - name: Process gluster PODs package lists results
- set_fact:
- gluster_pod_package_list_processed: "{{
- gluster_pod_package_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_package_list_results.results }}"
- - name: Get info about services on Gluster PODs
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- systemctl list-units {{
- gluster_pod_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend) ||
- echo failed_to_get_services_info_from_gluster_pod"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_service_list_results
- - name: Process gluster PODs service lists results
- set_fact:
- gluster_pod_service_list_processed: "{{
- gluster_pod_service_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_service_list_results.results }}"
- - name: Write Gluster PODs data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ dir_path }}gluster_pods_packages_and_services_data.yaml"
- state: present
- edits:
- - key: gluster_pods
- value:
- Storage release version: "{{
- gluster_pod_storage_release_version_processed }}"
- List of Packages: "{{ gluster_pod_package_list_processed }}"
- List of Services: "{{ gluster_pod_service_list_processed }}"
-
- - name: Get 'targetcli ls' output
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- targetcli ls) || echo failed_to_get_targetcli_ls_output"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_targetcli_ls_results
- - debug:
- msg: "{{ gluster_pod_targetcli_ls_results }}"
- - name: Write Gluster PODs data to the data file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-targetcli-ls"
- with_items: "{{ gluster_pod_targetcli_ls_results.results }}"
-
- - name: Create gluster log directories on the master node
- file:
- path: "/tmp/gluster/{{ item }}-var_log_glusterfs"
- state: directory
- with_items: "{{ gluster_pod_names }}"
- - name: Copy '/var/log/glusterfs/*' files to the master
- shell: "(oc cp {{ storage_namespace }}/{{ item }}:/var/log/glusterfs
- /tmp/gluster/{{ item }}-var_log_glusterfs) ||
- echo failed_to_copy_var_log_glusterfs_files"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy '/var/log/glusterfs/*' files from the master to the localhost
- synchronize:
- src: "/tmp/gluster/{{ item }}-var_log_glusterfs/"
- dest: "{{ dir_path }}{{ item }}-var_log_glusterfs/"
- mode: pull
- recursive: yes
- use_ssh_args: yes
- with_items: "{{ gluster_pod_names }}"
- ignore_errors: yes
-
- - name: Get 'dmesg -T' info
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- dmesg -T) || echo failed_toget_dmesg_-T_info"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pods_dmesg_results
- - name: Write Gluster PODs dmesg data to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-dmesg"
- with_items: "{{ gluster_pods_dmesg_results.results }}"
-
- - name: Get list of processes and info for processes with 'D' stat
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} -- ps aux ;
- oc exec {{ item }} --namespace {{ storage_namespace }} --
- ps -aux | tee /dev/tty | awk {'if ( $8 ~ \"D\" ) print $2'} |
- while read -r pid ;
- do echo -e \"\nRunning '/proc/$pid/stack' command:\";
- cat /proc/$pid/stack ;
- done) || echo failed_to_get_info_about_processes_with_D_stat"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_io_processes_info
- - name: Write Gluster PODs 'I\O' proccesses info to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-ps"
- with_items: "{{ gluster_pod_io_processes_info.results }}"
-
- - name: List dirs and files in '/sys/module/dm_multipath'
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- ls -l /sys/module/dm_multipath) ||
- echo failed_to_list_files_in__sys_module_dm_multipath"
- with_items: "{{ gluster_pod_names }}"
- register: sys_module_dm_multipath_results
- - name: Write Gluster PODs 'ls -l /sys/module/dm_multipath' output to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0]
- }}-ls-sys_module_dm_multipath"
- with_items: "{{ sys_module_dm_multipath_results.results }}"
-
- - name: "Run 'lsmod | egrep target_core' command"
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- lsmod | egrep target_core) ||
- echo failed_to_get_lsmod_info_for_target_core"
- with_items: "{{ gluster_pod_names }}"
- register: lsmod_target_core_results
- - name: Write Gluster PODs 'lsmod | egrep target_core' command results to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{
- (item.item.strip().split(' ') | list)[0] }}-lsmod_target_core"
- with_items: "{{ lsmod_target_core_results.results }}"
-
- - name: Get info about devices
- shell: '(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- bash -c "echo -e \"{{ separator_line }}\nlsblk info:\"; lsblk;
- echo -e \"{{ separator_line }}\nPVs info:\"; pvs;
- echo -e \"{{ separator_line }}\nVGs info:\"; vgs;
- echo -e \"{{ separator_line }}\nLVs info:\"; lvs;
- echo -e \"{{ separator_line }}\"") ||
- echo failed_to_get_list_of_pvs_vgs_and_lvs'
- with_items: "{{ gluster_pod_names }}"
- register: lsblk_pvs_vgs_lvs_results
- - name: Write Gluster PODs info about PVs, VGs and LVs to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0]
- }}-lsblk_pvs_vgs_lvs"
- with_items: "{{ lsblk_pvs_vgs_lvs_results.results }}"
-
- - name: Read 'journalctl' output
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- journalctl) || echo failed_to_read_journalctl_output"
- with_items: "{{ gluster_pod_names }}"
- register: journalctl_results
- - name: Write Gluster PODs 'journalctl' output to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0]
- }}-journalctl"
- with_items: "{{ journalctl_results.results }}"
-
- - name: Read 'mount' output
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- mount) || echo failed_to_read_mount_output"
- with_items: "{{ gluster_pod_names }}"
- register: mount_results
- - name: Write Gluster PODs 'mount' output to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-mount"
- with_items: "{{ mount_results.results }}"
-
- - name: Create archive from ' /etc/target/' dir
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- tar -czvf etc_target.tar.gz /etc/target/ ) ||
- echo failed_to_archive__etc_target_dir"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/etc/target/' dir to the master
- shell: "(oc cp {{ storage_namespace }}/{{ item }}:/etc_target.tar.gz
- /tmp/gluster/{{ item }}-etc_target.tar.gz) ||
- echo failed_to_copy_etc_target_file"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/etc/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ item }}-etc_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- with_items: "{{ gluster_pod_names }}"
-
- - name: Create archive from '/sys/kernel/config/target/' dir
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- tar -czvf sys_kernel_config_target.tar.gz /sys/kernel/config/target/ ) ||
- echo failed_to_archive__sys_kernel_config_target_dir"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/sys/kernel/config/target/' dir to the master
- shell: "(oc cp {{ storage_namespace }}/{{ item }}:/sys_kernel_config_target.tar.gz
- /tmp/gluster/{{ item }}-sys_kernel_config_target.tar.gz) ||
- echo failed_to_copy_sys_kernel_config_target_file"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/sys/kernel/config/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ item }}-sys_kernel_config_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- with_items: "{{ gluster_pod_names }}"
- when: "is_gluster_containerized"
-
-# Gather info from gluster nodes in case of 'standalone' deployment
-- hosts: logs_gluster_nodes
- gather_facts: no
- vars:
- gluster_package_list:
- - gluster
- - heketi
- - targetcli
- - gluster-block
- - tcmu-runner
- - python-configshell
- - python-rtslib
- gluster_service_list:
- - glusterd
- - heketi
- - gluster-blockd
- - gluster-block-target
- - tcmu-runner
- - rpcbind
- tasks:
- - block:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}gluster_nodes/"
- - name: Create directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- run_once: yes
-
- - name: Get storage release version from Gluster node
- shell: "(cat /etc/redhat-storage-release) ||
- echo failed_to_get_redhat_storage_release_info"
- register: gluster_node_storage_release_version_results
- - name: Get info about packages on Gluster node
- shell: "(rpm -qa | grep -e {{ gluster_package_list | join(' -e ') }}) ||
- echo failed_to_get_packages_info_from_gluster_node"
- register: gluster_node_package_list_results
- - name: Get info about services on Gluster node
- shell: "(systemctl list-units {{
- gluster_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend) ||
- echo failed_to_get_services_info_from_gluster_node"
- register: gluster_node_service_list_results
- - name: Write Gluster node data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ dir_path }}gluster_nodes_packages_and_services_data.yaml"
- state: present
- edits:
- - key: "gluster_node_{{ current_hostname }}"
- value:
- Storage release version: "{{
- gluster_node_storage_release_version_results.stdout }}"
- List of Packages: "{{ gluster_node_package_list_results.stdout_lines }}"
- List of Services: "{{ gluster_node_service_list_results.stdout_lines }}"
-
- - name: Get 'targetcli ls' output
- shell: "targetcli ls || echo failed_to_get_targetcli_ls_output"
- register: gluster_node_targetcli_ls_results
- - name: Write Gluster node data to the data file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ gluster_node_targetcli_ls_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-targetcli-ls"
-
- - name: Copy '/var/log/glusterfs/*' files from the current gluster node
- synchronize:
- src: "/var/log/glusterfs/"
- dest: "{{ dir_path }}{{ current_hostname }}-var_log_glusterfs/"
- mode: pull
- recursive: yes
- use_ssh_args: yes
- ignore_errors: yes
-
- - name: Get info about space usage
- shell: '(echo -e "File system disk space usage on the {{
- current_hostname }} node:\n"; df -Th) ||echo failed_to_get_df_info'
- register: df_info
- - name: Save mounts info into a file
- delegate_to: localhost
- copy:
- content: "{{ df_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_df"
-
- - name: Get 'dmesg -T' info
- shell: "dmesg -T || echo failed_toget_dmesg_-T_info"
- register: gluster_node_dmesg_results
- - name: Write Gluster node dmesg data to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ gluster_node_dmesg_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-dmesg"
-
- - name: Get list of processes and info for processes with 'D' stat
- shell: "( ps aux ;
- ps -aux | tee /dev/tty | awk {'if ( $8 ~ \"D\" ) print $2'} |
- while read -r pid ;
- do echo -e \"\nRunning '/proc/$pid/stack' command:\";
- cat /proc/$pid/stack ;
- done) || echo failed_to_get_info_about_processes_with_D_stat"
- register: gluster_node_io_processes_info
- - name: Write Gluster node 'I\O' proccesses info to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ gluster_node_io_processes_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-ps"
-
- - name: List dirs and files in '/sys/module/dm_multipath'
- shell: "ls -l /sys/module/dm_multipath ||
- echo failed_to_list_files_in__sys_module_dm_multipath"
- register: sys_module_dm_multipath_results
- - name: Write Gluster node 'ls -l /sys/module/dm_multipath' output to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ sys_module_dm_multipath_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-ls-sys_module_dm_multipath"
-
- - name: "Run 'lsmod | egrep target_core' command"
- shell: "(lsmod | egrep target_core) ||
- echo failed_to_get_lsmod_info_for_target_core"
- register: lsmod_target_core_results
- - name: Write Gluster node 'lsmod | egrep target_core' command results to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ lsmod_target_core_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-lsmod_target_core"
-
- - name: Get info about devices
- shell: '(echo -e "{{ separator_line }}\nlsblk info:"; lsblk;
- echo -e "{{ separator_line }}\nPVs info:"; pvs;
- echo -e "{{ separator_line }}\nVGs info:"; vgs;
- echo -e "{{ separator_line }}\nLVs info:"; lvs;
- echo -e "{{ separator_line }}\nll /dev/disk/by-path/ip-*:";
- ll /dev/disk/by-path/ip-*; echo {{ separator_line }}) ||
- echo failed_to_get_info'
- register: lsblk_pvs_vgs_lvs
- - name: Save devices info into a file
- delegate_to: localhost
- copy:
- content: "{{ lsblk_pvs_vgs_lvs.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_lsblk_pvs_vgs_lvs"
-
- - name: Read 'journalctl' output
- shell: "journalctl || echo failed_to_read_journalctl_output"
- register: journalctl_results
- - name: Write Gluster node 'journalctl' output to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ journalctl_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-journalctl"
-
- - name: Read 'mount' output
- shell: "mount || echo failed_to_read_mount_output"
- register: mount_results
- - name: Write Gluster node 'mount' output to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ mount_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-mount"
-
- - name: Create archive from ' /etc/target/' dir
- shell: "tar -czvf /tmp/gluster/{{ current_hostname
- }}-etc_target.tar.gz /etc/target/ ||
- echo failed_to_archive__etc_target_dir"
- - name: Copy archive of the '/etc/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ current_hostname }}-etc_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
-
- - name: Create archive from '/sys/kernel/config/target/' dir
- shell: "tar -czvf /tmp/gluster/{{ current_hostname
- }}-sys_kernel_config_target.tar.gz /sys/kernel/config/target/ ||
- echo failed_to_archive__sys_kernel_config_target_dir"
- - name: Copy archive of the '/sys/kernel/config/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ current_hostname }}-sys_kernel_config_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
-
- - name: Create archive from '/var/log/messages' dir
- shell: "tar -czvf var_log_messages.tar.gz /var/log/messages"
- retries: 15
- delay: 2
- register: result
- until: result is succeeded
- ignore_errors: yes
- - name: Copy the archive to the localhost
- fetch:
- src: "var_log_messages.tar.gz"
- dest: "{{ dir_path }}{{ current_hostname }}_var_log_messages.tar.gz"
- flat: yes
- fail_on_missing: yes
- ignore_errors: yes
- when: "not is_gluster_containerized"
-
-# Gather info from compute nodes
-- hosts: logs_compute_nodes
- gather_facts: no
- vars:
- compute_package_list:
- - docker
- - heketi
- - rpcbind
- compute_service_list:
- - docker
- - multipathd
- - rpcbind
- - iscsid
- tasks:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}compute_nodes/"
- - name: Create compute directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- run_once: yes
-
- - name: Create grep filter with all the packages we are interested in
- set_fact:
- package_filter: "{{ package_filter | default('grep') + ' -e ' + item }}"
- with_items: "{{ compute_package_list }}"
- - name: Get list of installed packages we are interested in
- shell: "rpm -qa | {{ package_filter }} || echo failed_to_get_info"
- register: compute_packages
- - name: Write compute nodes package list to a file
- delegate_to: localhost
- copy:
- content: "{{ compute_packages.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_packages.yaml"
-
- - name: Get status of services on OCP Compute nodes
- shell: "systemctl list-units {{
- compute_service_list | join('.service ') }}.service --no-pager
- --type=service --all --no-legend || echo failed_to_get_info"
- register: compute_services
- - name: Write compute nodes service list to a file
- delegate_to: localhost
- copy:
- content: "{{ compute_services.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_services.yaml"
-
- - name: Get multipath info
- shell: "(echo 'Multipath config is following:'; cat /etc/multipath.conf;
- echo -e \"{{ separator_line }}\nResults of 'nmultipath -ll' command:\";
- multipath -ll; echo {{ separator_line }}) || echo failed_to_get_info"
- register: multipath_info
- - name: Save multipath info into a file
- delegate_to: localhost
- copy:
- content: "{{ multipath_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_multipath_info"
-
- - name: Get info about devices
- shell: '(echo -e "{{ separator_line }}\nlsblk info:"; lsblk;
- echo -e "{{ separator_line }}\nPVs info:"; pvs;
- echo -e "{{ separator_line }}\nVGs info:"; vgs;
- echo -e "{{ separator_line }}\nLVs info:"; lvs;
- echo -e "{{ separator_line }}\nll /dev/disk/by-path/ip-*:";
- ll /dev/disk/by-path/ip-*; echo {{ separator_line }}) ||
- echo failed_to_get_info'
- register: lsblk_pvs_vgs_lvs
- - name: Save devices info into a file
- delegate_to: localhost
- copy:
- content: "{{ lsblk_pvs_vgs_lvs.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_lsblk_pvs_vgs_lvs"
-
- - name: Get info about mounts
- shell: '(echo -e "Mounts on the {{ current_hostname }} node:\n"; mount) ||
- echo failed_to_get_info'
- register: mounts
- - name: Save mounts info into a file
- delegate_to: localhost
- copy:
- content: "{{ mounts.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_mount"
-
- - name: Get info about space usage
- shell: '(echo -e "File system disk space usage on the {{
- current_hostname }} node:\n"; df -Th) ||echo failed_to_get_df_info'
- register: df_info
- - name: Save mounts info into a file
- delegate_to: localhost
- copy:
- content: "{{ df_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_df"
-
- - name: Read 'dmesg -T' info
- shell: "dmesg -T || echo failed_to_get_info"
- register: dmesg_info
- - name: Save dmesg info into a file
- delegate_to: localhost
- copy:
- content: "{{ dmesg_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_dmesg"
-
- - name: Create archive from '/var/log/messages' dir
- shell: "tar -czvf var_log_messages.tar.gz /var/log/messages"
- retries: 15
- delay: 2
- register: result
- until: result is succeeded
- ignore_errors: yes
- - name: Copy the archive to the localhost
- fetch:
- src: "var_log_messages.tar.gz"
- dest: "{{ dir_path }}{{ current_hostname }}_var_log_messages.tar.gz"
- flat: yes
- fail_on_missing: yes
- ignore_errors: yes
diff --git a/deployment/playbooks/generate-tests-config.yaml b/deployment/playbooks/generate-tests-config.yaml
deleted file mode 100644
index a4f77f9f..00000000
--- a/deployment/playbooks/generate-tests-config.yaml
+++ /dev/null
@@ -1,140 +0,0 @@
-# Run this playbook this way:
-#
-# $ tox -e ocp3.11 -- ansible-playbook -i ocp-master-node-hostname-or-ip, \
-# playbooks/generate-tests-config.yaml \
-# -e output_filepath=tests-config.yaml \
-# -e additional_devices=/dev/sdf
-#
-# Supported vars:
-# - output_filepath: optional. Defines path for an output tests config file.
-# - additional_devices: optional. Device names like "/dev/sdf" separated by
-# comma. Should be bare devices which can be attached to a Heketi cluster.
-# If it is not specified or empty, then appropriate test cases will be
-# skipped.
-# - master_ip: optional. Will be used as master node IP address if provided.
-#
-# Requirements:
-# - 'yedit' module should be enabled. It will be enabled running this playbook
-# via 'tox' command.
-# - playbook should run only on one host - OpenShift master node.
-#
-# Notes:
-# - tox's env name can be any of the following:
-# 'ocp3.6', 'ocp3.7', 'ocp3.9', 'ocp3.10' or 'ocp3.11'. The criterion is
-# to have 'yedit' module enabled. Which is enabled in any of those envs.
-
----
-- hosts: all[0]
- gather_facts: no
- tasks:
- - name: Read full hostname of the master node
- shell: "hostname -f"
- register: master_hostname_raw
- - name: Init vars
- set_fact:
- master_hostname: "{{ master_hostname_raw.stdout.strip() }}"
- master_ip: "{{ master_ip | default(hostvars.keys()[0]) }}"
- output_filepath: "{{
- output_filepath | default('../tests-config.yaml') }}"
- additional_devices_list: []
- - name: Process specified additional devices
- set_fact:
- additional_devices_list: "{{ additional_devices_list + [item] }}"
- with_items: "{{ (additional_devices | default('')).split(',') }}"
- when: item.strip() != ''
-
- - name: Read namespace of a Heketi deployment config
- shell: "oc get dc --all-namespaces | grep -e heketi | grep -v registry | awk '{ print $1}'"
- register: heketi_namespace_raw
- - name: Save namespace name in a separate var
- set_fact:
- heketi_namespace: "{{ heketi_namespace_raw.stdout.strip() }}"
- - name: Read Heketi dc name
- shell: "oc get dc -n {{ heketi_namespace
- }} | grep heketi | awk '{ print $1}'"
- register: heketi_dc_name_raw
- - name: Read Heketi svc name
- shell: "oc get svc -n {{ heketi_namespace
- }} | grep -e heketi | grep -v heketi-db | awk '{ print $1}'"
- register: heketi_svc_name_raw
- - name: Save Heketi DC and SVC names in separate vars
- set_fact:
- heketi_dc_name: "{{ heketi_dc_name_raw.stdout.strip() }}"
- heketi_svc_name: "{{ heketi_svc_name_raw.stdout.strip() }}"
- - name: Read Heketi service IP address
- shell: "oc get svc {{ heketi_svc_name }} -n {{ heketi_namespace
- }} -o=custom-columns=:.spec.clusterIP --no-headers"
- register: heketi_ip_raw
- - name: Read Heketi admin's secret
- shell: oc get dc -n {{ heketi_namespace }} {{ heketi_dc_name
- }} -o jsonpath='{.spec.template.spec.containers[0].env[?(@.name==
- "HEKETI_ADMIN_KEY")].value}'
- register: heketi_admin_secret_raw
- - name: Save Heketi admin secret to a var
- set_fact:
- heketi_admin_secret: "{{ heketi_admin_secret_raw.stdout.strip() }}"
- - name: Read Gluster nodes addresses
- shell: "heketi-cli --server=http://{{
- heketi_ip_raw.stdout.strip() }}:8080 --user=admin --secret={{
- heketi_admin_secret }} topology info --json |
- python -c \"from __future__ import print_function; import sys, json;
- topology = json.loads(sys.stdin.readlines()[0]);
- ips = [(n['hostnames']['manage'][0], n['hostnames']['storage'][0])
- for n in topology['clusters'][0]['nodes']];
- [print(ip[0], ip[1]) for ip in ips]\""
- register: gluster_nodes_addresses_raw
- - name: Process gluster data and save it to a var
- set_fact:
- gluster_servers_data: "{{ gluster_servers_data | default({}) | combine(
- {item.split(' ')[1]: {
- 'manage': item.split(' ')[0], 'storage': item.split(' ')[1],
- 'additional_devices': additional_devices_list,
- }}
- )}}"
- with_items: "{{ gluster_nodes_addresses_raw.stdout_lines }}"
-
- - name: Save all the calculated data to the tests config file
- delegate_to: localhost
- connection: local
- yedit:
- src: "{{ output_filepath }}"
- state: present
- backup: false
- edits:
- - key: common
- value:
- stop_on_first_failure: false
- - key: gluster_servers
- value: "{{ gluster_servers_data }}"
- - key: ocp_servers.client
- value: "{{ {master_ip: {'hostname': master_hostname}} }}"
- - key: ocp_servers.master
- value: "{{ {master_ip: {'hostname': master_hostname}} }}"
- - key: ocp_servers.nodes
- value: {}
- - key: openshift.storage_project_name
- value: "{{ heketi_namespace }}"
- - key: openshift.dynamic_provisioning.storage_classes.block_storage_class
- value:
- chapauthenabled: "true"
- hacount: "3"
- provisioner: "gluster.org/glusterblock"
- restsecretnamespace: "{{ heketi_namespace }}"
- resturl: "http://{{ master_ip }}:8080"
- restuser: "admin"
- volumenameprefix: "autotests-block"
- - key: openshift.dynamic_provisioning.storage_classes.file_storage_class
- value:
- provisioner: "kubernetes.io/glusterfs"
- resturl: "http://{{ master_ip }}:8080"
- restuser: "admin"
- secretnamespace: "{{ heketi_namespace }}"
- volumenameprefix: "autotests-file"
- - key: openshift.heketi_config
- value:
- heketi_cli_key: "{{ heketi_admin_secret }}"
- heketi_cli_user: "admin"
- heketi_client_node: "{{ master_ip }}"
- heketi_dc_name: "{{ heketi_dc_name }}"
- heketi_server_url: "http://{{ master_ip }}:8080"
- heketi_service_name: "{{ heketi_svc_name }}"
diff --git a/deployment/playbooks/get_ocp_info.yaml b/deployment/playbooks/get_ocp_info.yaml
deleted file mode 100644
index 7046ccc6..00000000
--- a/deployment/playbooks/get_ocp_info.yaml
+++ /dev/null
@@ -1,233 +0,0 @@
----
-# Run this playbook that way:
-# $ ansible-playbook -i ocp-master-node-hostname-or-ip, get_ocp_info.yaml
-
-# Ansible runner machine info
-- hosts: localhost
- gather_facts: no
- tasks:
- - name: Generate name for data file
- set_fact:
- data_file_path: "{{ lookup('env', 'VIRTUAL_ENV') }}/../../ocp_{{
- (groups['all'][0]).replace('.', '_')
- }}_info.yaml"
- - name: Print data file name
- debug:
- msg: "Data file path is '{{ data_file_path }}'"
- - name: "[Re]Create file where we are going to store gathered data"
- copy:
- content: ""
- dest: "{{ data_file_path }}"
- mode: 0644
- force: yes
-
- - name: Get Linux kernel version of ansible runner
- shell: "uname -a"
- register: ansible_runner_linux_kernel_version
- - name: Get Red Hat release info for ansible runner
- shell: "cat /etc/redhat-release"
- register: ansible_runner_rh_release
- ignore_errors: yes
- - name: Get ansible-playbook version from ansible runner
- shell: "{{ lookup('env', 'VIRTUAL_ENV') }}/bin/ansible-playbook --version |
- grep '^ansible-playbook' | awk '{print $2}'"
- register: ansible_runner_ansible_playbook_version
- - name: Get 'openshift-ansible' lib version used by ansible runner
- shell: "echo \"openshift-ansible-$(cat {{
- lookup('env', 'VIRTUAL_ENV')
- }}/usr/share/ansible/openshift-ansible/.tito/packages/openshift-ansible | awk '{print $1}')\""
- register: ansible_runner_oc_lib_version
- - name: Write ansible runner data to the data file
- yedit:
- src: "{{ data_file_path }}"
- state: present
- backup: false
- edits:
- - key: 01_ansible_runner
- value:
- Linux kernel version: "{{ ansible_runner_linux_kernel_version.stdout_lines }}"
- Red Hat release info: "{{
- ansible_runner_rh_release.stdout_lines or
- 'File /etc/redhat-release was not found. Not RHEL machine?' }}"
- ansible-playbook version: "{{ ansible_runner_ansible_playbook_version.stdout_lines }}"
- openshift-ansible lib version: "{{ ansible_runner_oc_lib_version.stdout_lines }}"
-
-# === Master node info ===
-- hosts: all[0]
- gather_facts: no
- vars:
- master_package_list:
- - docker
- - heketi
- master_service_list:
- - docker
- - multipathd
- gluster_pod_package_list:
- - gluster
- - heketi
- - targetcli
- - gluster-block
- - tcmu-runner
- - python-configshell
- - python-rtslib
- gluster_pod_service_list:
- - glusterd
- - heketi
- - gluster-blockd
- - gluster-block-target
- - tcmu-runner
- - rpcbind
- heketi_pod_package_list:
- - gluster
- - heketi
- # NOTE(vponomar): we do not process list of Heketi POD services for 2 reasons:
- # 1) No requirement to get status of any of services on Heketi POD.
- # 2) 'systemctl' does not work on it.
- tasks:
- - name: Get distro version of ansible runner
- shell: "uname -a"
- register: master_linux_kernel_version
- - name: Get Red Hat release info for ansible runner
- shell: "cat /etc/redhat-release"
- register: master_rh_release
- - name: Create grep filter with all the packages we are interested in
- set_fact:
- package_filter: "{{ package_filter | default('grep') + ' -e ' + item }}"
- with_items: "{{ master_package_list }}"
- - name: Get list of installed packages we are interested in
- shell: "rpm -qa | {{ package_filter }}"
- register: master_packages
- - name: Get status of services on OCP Master node
- shell: "systemctl list-units {{ master_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend"
- register: master_services
- - name: Get OpenShift client version
- shell: "oc version | grep -e 'oc ' -e 'openshift' -e 'kube'"
- register: master_oc_version
- - name: Get list of OCP nodes
- shell: "oc get nodes"
- register: master_ocp_nodes
- - name: Get info about all the docker images used in OCP cluster
- shell: "oc get pods --all-namespaces
- -o=custom-columns=:.status.containerStatuses[*].image | grep -v -e '^$' | uniq"
- register: master_image_info
- - name: Write master data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ hostvars['localhost']['data_file_path'] }}"
- state: present
- edits:
- - key: 02_master
- value:
- Linux kernel version: "{{ master_linux_kernel_version.stdout_lines }}"
- Red Hat release info: "{{ master_rh_release.stdout_lines }}"
- List of Packages: "{{ master_packages.stdout_lines }}"
- List of services: "{{ master_services.stdout_lines }}"
- OC Version: "{{ master_oc_version.stdout_lines }}"
- OCP nodes: "{{ master_ocp_nodes.stdout_lines }}"
- Images info: "{{ master_image_info.stdout_lines }}"
-
- # Heketi POD
- - name: Get heketi POD
- shell: "oc get pods --all-namespaces -l heketi
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: heketi_pods
- - name: DEBUG HEKETI
- debug:
- msg: "{{ heketi_pods }}"
- - block:
- - name: Get storage release version from Heketi POD
- shell: "oc exec {{ (heketi_pods.stdout_lines[1].split(' ') | list)[0] }}
- --namespace {{ (heketi_pods.stdout_lines[1].split(' ') | list)[-1] }} --
- cat /etc/redhat-storage-release"
- register: heketi_pod_storage_release_version
- - name: Get info about packages on Heketi POD
- shell: "oc exec {{ (heketi_pods.stdout_lines[1].split(' ') | list)[0] }}
- --namespace {{ (heketi_pods.stdout_lines[1].split(' ') | list)[-1] }} --
- rpm -qa | grep -e {{ heketi_pod_package_list | join(' -e ') }}"
- register: heketi_pod_packages
- - name: Write Heketi data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ hostvars['localhost']['data_file_path'] }}"
- state: present
- edits:
- - key: 03_heketi_pod
- value:
- Storage release version: "{{ heketi_pod_storage_release_version.stdout_lines }}"
- List of Packages: "{{ heketi_pod_packages.stdout_lines }}"
- when: "{{ ((heketi_pods.stdout_lines | join('')).strip() | length) > 0 }}"
-
- # Gluster PODs
- - name: Get list of Gluster PODs
- shell: "oc get pods --all-namespaces -l glusterfs-node
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: gluster_pods
- - name: DEBUG GLUSTER
- debug:
- msg: "{{ gluster_pods }}"
- - block:
- - name: Get storage release version from Gluster PODs
- shell: "oc exec {{ (item.split(' ') | list)[0] }}
- --namespace {{ (item.split(' ') | list)[-1] }} --
- cat /etc/redhat-storage-release"
- with_items: "{{ gluster_pods.stdout_lines[1:] }}"
- register: gluster_pod_storage_release_version_results
- - name: Process gluster PODs storage release versions results
- set_fact:
- gluster_pod_storage_release_version_processed: "{{
- gluster_pod_storage_release_version_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_storage_release_version_results.results }}"
- - name: Get info about packages on Gluster PODs
- shell: "oc exec {{ (item.split(' ') | list)[0] }}
- --namespace {{ (item.split(' ') | list)[-1] }} --
- rpm -qa | grep -e {{ gluster_pod_package_list | join(' -e ') }}"
- with_items: "{{ gluster_pods.stdout_lines[1:] }}"
- register: gluster_pod_package_list_results
- - name: Process gluster PODs package lists results
- set_fact:
- gluster_pod_package_list_processed: "{{
- gluster_pod_package_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_package_list_results.results }}"
- - name: Get info about services on Gluster PODs
- shell: "oc exec {{ (item.split(' ') | list)[0] }}
- --namespace {{ (item.split(' ') | list)[-1] }} --
- systemctl list-units {{ gluster_pod_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend"
- with_items: "{{ gluster_pods.stdout_lines[1:] }}"
- register: gluster_pod_service_list_results
- - name: Process gluster PODs service lists results
- set_fact:
- gluster_pod_service_list_processed: "{{
- gluster_pod_service_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_service_list_results.results }}"
- - name: Write Gluster PODs data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ hostvars['localhost']['data_file_path'] }}"
- state: present
- edits:
- - key: 04_gluster_pods
- value:
- Storage release version: "{{ gluster_pod_storage_release_version_processed }}"
- List of Packages: "{{ gluster_pod_package_list_processed }}"
- List of Services: "{{ gluster_pod_service_list_processed }}"
- when: "{{ ((gluster_pods.stdout_lines | join('')).strip() | length) > 0 }}"
-
-- hosts: localhost
- gather_facts: no
- tasks:
- - shell: "cat {{ data_file_path }}"
- register: data_file_content
- - name: Print gathered data
- debug:
- msg: "{{ data_file_content.stdout_lines }}"
diff --git a/deployment/playbooks/library/rpm_q.py b/deployment/playbooks/library/rpm_q.py
deleted file mode 120000
index 43f43786..00000000
--- a/deployment/playbooks/library/rpm_q.py
+++ /dev/null
@@ -1 +0,0 @@
-/usr/share/ansible/openshift-ansible/library/rpm_q.py \ No newline at end of file
diff --git a/deployment/playbooks/library/vmware_folder.py b/deployment/playbooks/library/vmware_folder.py
deleted file mode 100644
index 8e1d9665..00000000
--- a/deployment/playbooks/library/vmware_folder.py
+++ /dev/null
@@ -1,268 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2017, Davis Phillips davis.phillips@gmail.com
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-ANSIBLE_METADATA = {
- 'status': ['preview'],
- 'supported_by': 'community',
- 'version': '1.0',
-}
-
-DOCUMENTATION = """
----
-module: vmware_folder
-short_description: Add/remove folders to/from vCenter
-description:
- - This module can be used to add/remove a folder to/from vCenter
-version_added: 2.3
-author: "Davis Phillips (@dav1x)"
-notes:
- - Tested on vSphere 6.5
-requirements:
- - "python >= 2.6"
- - PyVmomi
-options:
- datacenter:
- description:
- - Name of the datacenter to add the host
- required: True
- cluster:
- description:
- - Name of the cluster to add the host
- required: True
- folder:
- description:
- - Folder name to manage
- required: True
- hostname:
- description:
- - ESXi hostname to manage
- required: True
- username:
- description:
- - ESXi username
- required: True
- password:
- description:
- - ESXi password
- required: True
- state:
- description:
- - Add or remove the folder
- default: 'present'
- choices:
- - 'present'
- - 'absent'
-extends_documentation_fragment: vmware.documentation
-
-
-EXAMPLES =
-# Create a folder
- - name: Add a folder to vCenter
- vmware_folder:
- hostname: vcsa_host
- username: vcsa_user
- password: vcsa_pass
- datacenter: datacenter
- cluster: cluster
- folder: folder
- state: present
-
-
-RETURN =
-instance:
- descripton: metadata about the new folder
- returned: always
- type: dict
- sample: None
-"""
-
-try:
- from pyVmomi import vim, vmodl
- HAS_PYVMOMI = True
-except ImportError:
- HAS_PYVMOMI = False
-
-from ansible.module_utils import basic # noqa
-from ansible.module_utils.vmware import ( # noqa
- connect_to_api,
- vmware_argument_spec,
- find_datacenter_by_name,
- find_cluster_by_name_datacenter,
- wait_for_task,
-)
-
-
-class VMwareFolder(object):
- def __init__(self, module):
- self.module = module
- self.datacenter = module.params['datacenter']
- self.cluster = module.params['cluster']
- self.folder = module.params['folder']
- self.hostname = module.params['hostname']
- self.username = module.params['username']
- self.password = module.params['password']
- self.state = module.params['state']
- self.dc_obj = None
- self.cluster_obj = None
- self.host_obj = None
- self.folder_obj = None
- self.folder_name = None
- self.folder_expanded = None
- self.folder_full_path = []
- self.content = connect_to_api(module)
-
- def find_host_by_cluster_datacenter(self):
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
-
- for host in self.cluster_obj.host:
- if host.name == self.hostname:
- return host, self.cluster
-
- return None, self.cluster
-
- def select_folder(self, host):
- fold_obj = None
- self.folder_expanded = self.folder.split("/")
- last_e = self.folder_expanded.pop()
- fold_obj = self.get_obj([vim.Folder], last_e)
- if fold_obj:
- return fold_obj
- if fold_obj is None:
- return fold_obj
-
- def get_obj(self, vimtype, name, return_all=False):
- obj = list()
- container = self.content.viewManager.CreateContainerView(
- self.content.rootFolder, vimtype, True)
-
- for c in container.view:
- if name in [c.name, c._GetMoId()]:
- if return_all is False:
- return c
- break
- else:
- obj.append(c)
-
- if len(obj) > 0:
- return obj
- else:
- # for backwards-compat
- return None
-
- def process_state(self):
- try:
- folder_states = {
- 'absent': {
- 'present': self.state_remove_folder,
- 'absent': self.state_exit_unchanged,
- },
- 'present': {
- 'present': self.state_exit_unchanged,
- 'absent': self.state_add_folder,
- }
- }
- folder_states[self.state][self.check_folder_state()]()
- except vmodl.RuntimeFault as runtime_fault:
- self.module.fail_json(msg=runtime_fault.msg)
- except vmodl.MethodFault as method_fault:
- self.module.fail_json(msg=method_fault.msg)
- except Exception as e:
- self.module.fail_json(msg=str(e))
-
- def state_exit_unchanged(self):
- self.module.exit_json(changed=False)
-
- def state_remove_folder(self):
- changed = True
- result = None
- self.folder_expanded = self.folder.split("/")
- f = self.folder_expanded.pop()
- task = self.get_obj([vim.Folder], f).Destroy()
-
- try:
- success, result = wait_for_task(task)
- except Exception:
- self.module.fail_json(
- msg="Failed to remove folder '%s'" % self.folder)
-
- self.module.exit_json(changed=changed, result=str(result))
-
- def state_add_folder(self):
- changed = True
-
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
- self.folder_expanded = self.folder.split("/")
- index = 0
- for f in self.folder_expanded:
- if not self.get_obj([vim.Folder], f):
- if index == 0:
- # First object gets created on the datacenter
- self.dc_obj.vmFolder.CreateFolder(name=f)
- else:
- parent_f = self.get_obj(
- [vim.Folder], self.folder_expanded[index - 1])
- parent_f.CreateFolder(name=f)
- index = index + 1
-
- self.module.exit_json(changed=changed)
-
- def check_folder_state(self):
-
- self.host_obj, self.cluster_obj = (
- self.find_host_by_cluster_datacenter())
- self.folder_obj = self.select_folder(self.host_obj)
-
- if self.folder_obj is None:
- return 'absent'
- else:
- return 'present'
-
-
-def main():
- argument_spec = vmware_argument_spec()
- argument_spec.update(dict(datacenter=dict(required=True, type='str'),
- cluster=dict(required=True, type='str'),
- folder=dict(required=True, type='str'),
- hostname=dict(required=True, type='str'),
- username=dict(required=True, type='str'),
- password=dict(
- required=True, type='str', no_log=True),
- state=dict(
- default='present',
- choices=['present', 'absent'], type='str')))
-
- module = basic.AnsibleModule(
- argument_spec=argument_spec, supports_check_mode=True)
-
- if not HAS_PYVMOMI:
- module.fail_json(msg='pyvmomi is required for this module')
-
- vmware_folder = VMwareFolder(module)
- vmware_folder.process_state()
-
-
-if __name__ == '__main__':
- main()
diff --git a/deployment/playbooks/library/vmware_resource_pool.py b/deployment/playbooks/library/vmware_resource_pool.py
deleted file mode 100644
index 0c9ebbd2..00000000
--- a/deployment/playbooks/library/vmware_resource_pool.py
+++ /dev/null
@@ -1,361 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2017, Davis Phillips davis.phillips@gmail.com
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-ANSIBLE_METADATA = {
- 'status': ['preview'],
- 'supported_by': 'community',
- 'version': '1.0',
-}
-
-DOCUMENTATION = """
----
-module: vmware_resource_pool
-short_description: Add/remove resource pools to/from vCenter
-description:
- - This module can be used to add/remove a resource pool to/from vCenter
-version_added: 2.3
-author: "Davis Phillips (@dav1x)"
-notes:
- - Tested on vSphere 6.5
-requirements:
- - "python >= 2.6"
- - PyVmomi
-options:
- datacenter:
- description:
- - Name of the datacenter to add the host
- required: True
- cluster:
- description:
- - Name of the cluster to add the host
- required: True
- resource_pool:
- description:
- - Resource pool name to manage
- required: True
- hostname:
- description:
- - ESXi hostname to manage
- required: True
- username:
- description:
- - ESXi username
- required: True
- password:
- description:
- - ESXi password
- required: True
- cpu_expandable_reservations:
- description:
- - In a resource pool with an expandable reservation,
- the reservation on a resource pool can grow beyond
- the specified value.
- default: True
- cpu_reservation:
- description:
- - Amount of resource that is guaranteed available to
- the virtual machine or resource pool.
- default: 0
- cpu_limit:
- description:
- - The utilization of a virtual machine/resource pool will not
- exceed this limit, even if there are available resources.
- default: -1 (No limit)
- cpu_shares:
- description:
- - Memory shares are used in case of resource contention.
- choices:
- - high
- - custom
- - low
- - normal
- default: Normal
- mem_expandable_reservations:
- description:
- - In a resource pool with an expandable reservation,
- the reservation on a resource pool can grow beyond
- the specified value.
- default: True
- mem_reservation:
- description:
- - Amount of resource that is guaranteed available to
- the virtual machine or resource pool.
- default: 0
- mem_limit:
- description:
- - The utilization of a virtual machine/resource pool will not
- exceed this limit, even if there are available resources.
- default: -1 (No limit)
- mem_shares:
- description:
- - Memory shares are used in case of resource contention.
- choices:
- - high
- - custom
- - low
- - normal
- default: Normal
- state:
- description:
- - Add or remove the resource pool
- default: 'present'
- choices:
- - 'present'
- - 'absent'
-extends_documentation_fragment: vmware.documentation
-
-
-EXAMPLES =
-# Create a resource pool
- - name: Add resource pool to vCenter
- vmware_resource_pool:
- hostname: vcsa_host
- username: vcsa_user
- password: vcsa_pass
- datacenter: datacenter
- cluster: cluster
- resource_pool: resource_pool
- mem_shares: normal
- mem_limit: -1
- mem_reservation: 0
- mem_expandable_reservations: True
- cpu_shares: normal
- cpu_limit: -1
- cpu_reservation: 0
- cpu_expandable_reservations: True
- state: present
-
-
-RETURN =
-instance:
- descripton: metadata about the new resource pool
- returned: always
- type: dict
- sample: None
-"""
-
-try:
- from pyVmomi import vim, vmodl
- HAS_PYVMOMI = True
-except ImportError:
- HAS_PYVMOMI = False
-
-from ansible.module_utils import basic # noqa
-from ansible.module_utils.vmware import ( # noqa
- get_all_objs,
- connect_to_api,
- vmware_argument_spec,
- find_datacenter_by_name,
- find_cluster_by_name_datacenter,
- wait_for_task,
-)
-
-
-class VMwareResourcePool(object):
- def __init__(self, module):
- self.module = module
- self.datacenter = module.params['datacenter']
- self.cluster = module.params['cluster']
- self.resource_pool = module.params['resource_pool']
- self.hostname = module.params['hostname']
- self.username = module.params['username']
- self.password = module.params['password']
- self.state = module.params['state']
- self.mem_shares = module.params['mem_shares']
- self.mem_limit = module.params['mem_limit']
- self.mem_reservation = module.params['mem_reservation']
- self.mem_expandable_reservations = (
- module.params['cpu_expandable_reservations'])
- self.cpu_shares = module.params['cpu_shares']
- self.cpu_limit = module.params['cpu_limit']
- self.cpu_reservation = module.params['cpu_reservation']
- self.cpu_expandable_reservations = (
- module.params['cpu_expandable_reservations'])
- self.dc_obj = None
- self.cluster_obj = None
- self.host_obj = None
- self.resource_pool_obj = None
- self.content = connect_to_api(module)
-
- def find_host_by_cluster_datacenter(self):
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
-
- for host in self.cluster_obj.host:
- if host.name == self.hostname:
- return host, self.cluster
-
- return None, self.cluster
-
- def select_resource_pool(self, host):
- pool_obj = None
-
- resource_pools = get_all_objs(self.content, [vim.ResourcePool])
-
- pool_selections = self.get_obj(
- [vim.ResourcePool], self.resource_pool, return_all=True)
- if pool_selections:
- for p in pool_selections:
- if p in resource_pools:
- pool_obj = p
- break
- return pool_obj
-
- def get_obj(self, vimtype, name, return_all=False):
- obj = list()
- container = self.content.viewManager.CreateContainerView(
- self.content.rootFolder, vimtype, True)
-
- for c in container.view:
- if name in [c.name, c._GetMoId()]:
- if return_all is False:
- return c
- break
- else:
- obj.append(c)
-
- if len(obj) > 0:
- return obj
- else:
- # for backwards-compat
- return None
-
- def process_state(self):
- try:
- rp_states = {
- 'absent': {
- 'present': self.state_remove_rp,
- 'absent': self.state_exit_unchanged,
- },
- 'present': {
- 'present': self.state_exit_unchanged,
- 'absent': self.state_add_rp,
- }
- }
-
- rp_states[self.state][self.check_rp_state()]()
-
- except vmodl.RuntimeFault as runtime_fault:
- self.module.fail_json(msg=runtime_fault.msg)
- except vmodl.MethodFault as method_fault:
- self.module.fail_json(msg=method_fault.msg)
- except Exception as e:
- self.module.fail_json(msg=str(e))
-
- def state_exit_unchanged(self):
- self.module.exit_json(changed=False)
-
- def state_remove_rp(self):
- changed = True
- result = None
- resource_pool = self.select_resource_pool(self.host_obj)
- try:
- task = self.resource_pool_obj.Destroy()
- success, result = wait_for_task(task)
-
- except Exception:
- self.module.fail_json(
- msg="Failed to remove resource pool '%s' '%s'" % (
- self.resource_pool, resource_pool))
- self.module.exit_json(changed=changed, result=str(result))
-
- def state_add_rp(self):
- changed = True
-
- rp_spec = vim.ResourceConfigSpec()
- cpu_alloc = vim.ResourceAllocationInfo()
- cpu_alloc.expandableReservation = self.cpu_expandable_reservations
- cpu_alloc.limit = int(self.cpu_limit)
- cpu_alloc.reservation = int(self.cpu_reservation)
- cpu_alloc_shares = vim.SharesInfo()
- cpu_alloc_shares.level = self.cpu_shares
- cpu_alloc.shares = cpu_alloc_shares
- rp_spec.cpuAllocation = cpu_alloc
- mem_alloc = vim.ResourceAllocationInfo()
- mem_alloc.limit = int(self.mem_limit)
- mem_alloc.expandableReservation = self.mem_expandable_reservations
- mem_alloc.reservation = int(self.mem_reservation)
- mem_alloc_shares = vim.SharesInfo()
- mem_alloc_shares.level = self.mem_shares
- mem_alloc.shares = mem_alloc_shares
- rp_spec.memoryAllocation = mem_alloc
-
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
- rootResourcePool = self.cluster_obj.resourcePool
- rootResourcePool.CreateResourcePool(self.resource_pool, rp_spec)
-
- self.module.exit_json(changed=changed)
-
- def check_rp_state(self):
-
- self.host_obj, self.cluster_obj = (
- self.find_host_by_cluster_datacenter())
- self.resource_pool_obj = self.select_resource_pool(self.host_obj)
-
- if self.resource_pool_obj is None:
- return 'absent'
- else:
- return 'present'
-
-
-def main():
- argument_spec = vmware_argument_spec()
- argument_spec.update(dict(datacenter=dict(required=True, type='str'),
- cluster=dict(required=True, type='str'),
- resource_pool=dict(required=True, type='str'),
- hostname=dict(required=True, type='str'),
- username=dict(required=True, type='str'),
- password=dict(
- required=True, type='str', no_log=True),
- mem_shares=dict(
- type='str', default="normal",
- choices=['high', 'custom', 'normal', 'low']),
- mem_limit=dict(type='int', default="-1"),
- mem_reservation=dict(type='int', default="0"),
- mem_expandable_reservations=dict(
- type='bool', default="True"),
- cpu_shares=dict(
- type='str', default="normal",
- choices=['high', 'custom', 'normal', 'low']),
- cpu_limit=dict(type='int', default="-1"),
- cpu_reservation=dict(type='int', default="0"),
- cpu_expandable_reservations=dict(
- type='bool', default="True"),
- state=dict(
- default='present',
- choices=['present', 'absent'], type='str')))
-
- module = basic.AnsibleModule(
- argument_spec=argument_spec, supports_check_mode=True)
-
- if not HAS_PYVMOMI:
- module.fail_json(msg='pyvmomi is required for this module')
-
- vmware_rp = VMwareResourcePool(module)
- vmware_rp.process_state()
-
-
-if __name__ == '__main__':
- main()
diff --git a/deployment/playbooks/node-setup.yaml b/deployment/playbooks/node-setup.yaml
deleted file mode 100644
index c8e5916e..00000000
--- a/deployment/playbooks/node-setup.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
----
-- include: "scaleup.yaml"
- vars:
- debug_level: 2
- openshift_debug_level: "{{ debug_level }}"
- openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
- osm_controller_args:
- osm_api_server_args:
- openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
- openshift_master_access_token_max_seconds: 2419200
- openshift_master_api_port: "{{ console_port }}"
- openshift_master_console_port: "{{ console_port }}"
- osm_cluster_network_cidr: 172.16.0.0/16
- openshift_registry_selector: "role=compute"
- openshift_router_selector: "role=compute"
- openshift_node_local_quota_per_fsgroup: 512Mi
- openshift_master_cluster_method: native
- openshift_cloudprovider_kind: vsphere
- openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
- openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
- openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
- openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
- openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
- openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
- os_sdn_network_plugin_name: "{{ openshift_sdn }}"
- deployment_type: "{{ deployment_type }}"
- load_balancer_hostname: "{{ lb_host }}"
- openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
- openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
diff --git a/deployment/playbooks/noop.yaml b/deployment/playbooks/noop.yaml
deleted file mode 100644
index 94173aed..00000000
--- a/deployment/playbooks/noop.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- ignore_errors: no
- tasks:
- - debug:
- msg: "No operation TASK for placeholder playbook."
diff --git a/deployment/playbooks/ocp-configure.yaml b/deployment/playbooks/ocp-configure.yaml
deleted file mode 100644
index c5123e6a..00000000
--- a/deployment/playbooks/ocp-configure.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: localhost
- gather_facts: yes
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - instance-groups
-
-- hosts: single_master
- gather_facts: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
- - storage-class-configure
diff --git a/deployment/playbooks/ocp-end-to-end.yaml b/deployment/playbooks/ocp-end-to-end.yaml
deleted file mode 100644
index 58f0ca01..00000000
--- a/deployment/playbooks/ocp-end-to-end.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- include: setup.yaml
- tags: ['setup']
-
-- include: prod.yaml
- tags: ['prod']
-
-- include: ocp-install.yaml
- tags: ['ocp-install']
-
-- include: ocp-configure.yaml
- tags: ['ocp-configure']
-
-- include: clean.yaml
- tags: ['clean']
diff --git a/deployment/playbooks/ocp-install.yaml b/deployment/playbooks/ocp-install.yaml
deleted file mode 100644
index 43b92c2f..00000000
--- a/deployment/playbooks/ocp-install.yaml
+++ /dev/null
@@ -1,365 +0,0 @@
----
-- hosts: localhost
- gather_facts: yes
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - instance-groups
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'prerequisite.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
- ) }}"
- vars:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- name: Call openshift includes for OCP3.6 and OCP3.7 installer
- include: "{{
- lookup('env', 'VIRTUAL_ENV')
- }}/usr/share/ansible/openshift-ansible/playbooks/{{
- (openshift_vers in ['v3_6', 'v3_7']) |
- ternary('byo/config.yml', 'deploy_cluster.yml')
- }}"
- vars:
- openshift_release: "v3.{{ openshift_vers.split('_')[-1] }}"
- debug_level: 2
- console_port: 8443
- openshift_debug_level: "{{ debug_level }}"
- openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
- # NOTE(vponomar): following two can be changed to "true" when
- # https://github.com/openshift/openshift-ansible/issues/6086 is fixed
- openshift_enable_service_catalog: false
- template_service_broker_install: false
- osm_controller_args:
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- osm_api_server_args:
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
- openshift_master_access_token_max_seconds: 2419200
- openshift_hosted_router_replicas: 1
- openshift_hosted_registry_replicas: 1
- openshift_master_api_port: "{{ console_port }}"
- openshift_master_console_port: "{{ console_port }}"
- openshift_node_local_quota_per_fsgroup: 512Mi
- osm_cluster_network_cidr: 172.16.0.0/16
- osm_use_cockpit: false
- osm_default_node_selector: "role=compute"
- openshift_registry_selector: "role=compute"
- openshift_override_hostname_check: true
- openshift_router_selector: "role=compute"
- openshift_master_cluster_method: native
- openshift_cloudprovider_kind: vsphere
- openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
- openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
- openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
- openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
- openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
- openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
- wildcard_zone: "{{ app_dns_prefix }}.{{ dns_zone }}"
- osm_default_subdomain: "{{ wildcard_zone }}"
- openshift_master_default_subdomain: "{{osm_default_subdomain}}"
- deployment_type: "{{ deployment_type }}"
- load_balancer_hostname: "{{ lb_host }}"
- openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
- openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
- os_sdn_network_plugin_name: "{{ openshift_sdn }}"
- openshift_master_identity_providers:
- - name: 'allow_all'
- kind: 'AllowAllPasswordIdentityProvider'
- login: True
- challenge: True
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- when: openshift_vers in ['v3_6', 'v3_7']
-
-- name: "Call openshift includes for OCP3.9+ installer"
- include: "{{
- lookup('env', 'VIRTUAL_ENV')
- }}/usr/share/ansible/openshift-ansible/playbooks/{{
- (openshift_vers in ['v3_6', 'v3_7']) |
- ternary('byo/config.yml', 'deploy_cluster.yml')
- }}"
- vars:
- openshift_release: "v3.{{ openshift_vers.split('_')[-1] }}"
- debug_level: 2
- console_port: 8443
- openshift_debug_level: "{{ debug_level }}"
- openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
- # NOTE(vponomar): following two can be changed to "true" when
- # https://github.com/openshift/openshift-ansible/issues/6086 is fixed
- openshift_enable_service_catalog: false
- template_service_broker_install: false
- osm_controller_args:
- feature-gates:
- - "ExpandPersistentVolumes=true"
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- osm_api_server_args:
- feature-gates:
- - "ExpandPersistentVolumes=true"
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- openshift_master_admission_plugin_config:
- PersistentVolumeClaimResize:
- configuration:
- apiVersion: v1
- disable: false
- kind: DefaultAdmissionConfig
- openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
- openshift_master_access_token_max_seconds: 2419200
- openshift_hosted_router_replicas: 1
- openshift_hosted_registry_replicas: 1
- openshift_master_api_port: "{{ console_port }}"
- openshift_master_console_port: "{{ console_port }}"
- openshift_node_local_quota_per_fsgroup: 512Mi
- osm_cluster_network_cidr: 172.16.0.0/16
- osm_use_cockpit: false
- osm_default_node_selector: "role=compute"
- openshift_registry_selector: "role=compute"
- openshift_override_hostname_check: true
- openshift_router_selector: "role=compute"
- openshift_master_cluster_method: native
- openshift_cloudprovider_kind: vsphere
- openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
- openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
- openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
- openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
- openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
- openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
- wildcard_zone: "{{ app_dns_prefix }}.{{ dns_zone }}"
- osm_default_subdomain: "{{ wildcard_zone }}"
- openshift_master_default_subdomain: "{{osm_default_subdomain}}"
- deployment_type: "{{ deployment_type }}"
- load_balancer_hostname: "{{ lb_host }}"
- openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
- openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
- os_sdn_network_plugin_name: "{{ openshift_sdn }}"
- openshift_master_identity_providers:
- - name: 'allow_all'
- kind: 'AllowAllPasswordIdentityProvider'
- login: True
- challenge: True
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- hosts: allnodes
- gather_facts: no
- ignore_errors: no
- tasks:
- - service:
- name: dnsmasq
- state: restarted
-
-- name: Run yum_update command on all the nodes and then reboot them
- hosts: localhost
- gather_facts: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/prerequisite.yaml b/deployment/playbooks/prerequisite.yaml
deleted file mode 100644
index 5c7cc399..00000000
--- a/deployment/playbooks/prerequisite.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- hosts: cluster_hosts
- gather_facts: yes
- become: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
- - package-repos
-
-- hosts: cluster_hosts
- gather_facts: no
- vars_files:
- - vars/main.yaml
- become: yes
- roles:
- - prerequisites
-
-- hosts: master
- gather_facts: yes
- vars_files:
- - vars/main.yaml
- become: yes
- roles:
- - master-prerequisites
- - etcd-storage
diff --git a/deployment/playbooks/prod-ose-cns.yaml b/deployment/playbooks/prod-ose-cns.yaml
deleted file mode 100644
index 80a85f11..00000000
--- a/deployment/playbooks/prod-ose-cns.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: localhost
- connection: local
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - create-vm-cns-prod-ose
- - setup-custom-domain-names-for-ansible-runner
diff --git a/deployment/playbooks/prod-ose-crs.yaml b/deployment/playbooks/prod-ose-crs.yaml
deleted file mode 100644
index aa9537ab..00000000
--- a/deployment/playbooks/prod-ose-crs.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: localhost
- connection: local
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - create-vm-crs-prod-ose
- - setup-custom-domain-names-for-ansible-runner
diff --git a/deployment/playbooks/prod.yaml b/deployment/playbooks/prod.yaml
deleted file mode 100644
index 04be066b..00000000
--- a/deployment/playbooks/prod.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- hosts: localhost
- vars_files:
- - vars/main.yaml
- roles:
- - create-vm-prod-ose
- - setup-custom-domain-names-for-ansible-runner
-
-- name: fulfill OSE3 prerequisites on production hosts roles
- hosts: production_group
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - package-repos
- - vmware-guest-setup
- - cloud-provider-setup
- - docker-storage-setup
- - openshift-volume-quota
diff --git a/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
deleted file mode 100644
index 1b93ce22..00000000
--- a/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: create /etc/origin/cloudprovider
- file:
- state: directory
- path: "{{ vsphere_conf_dir }}"
-
-- name: create the vsphere.conf file
- template:
- src: "{{ role_path }}/templates/vsphere.conf.j2"
- dest: /etc/origin/cloudprovider/vsphere.conf
- owner: root
- group: root
- mode: 0644
diff --git a/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2 b/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
deleted file mode 100644
index 8abe6e8c..00000000
--- a/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[Global]
-user = "{{ vcenter_username }}"
-password = "{{ vcenter_password }}"
-server = "{{ vcenter_host }}"
-port = 443
-insecure-flag = 1
-datacenter = {{ vcenter_datacenter }}
-datastore = {{ vcenter_datastore }}
-working-dir = /{{ vcenter_datacenter }}/vm/{{ vcenter_folder }}/
-[Disk]
-scsicontrollertype = pvscsi
diff --git a/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
deleted file mode 100644
index 81511c01..00000000
--- a/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-vsphere_conf_dir: /etc/origin/cloudprovider
-vsphere_conf: "{{vsphere_conf_dir }}/vsphere.conf"
diff --git a/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
deleted file mode 100644
index 392b5da1..00000000
--- a/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Add following nodes to the 'new_nodes' group
- set_fact:
- is_add_nodes: true
-
-- name: Import common node creation role
- import_role:
- name: create-vm-prod-ose
diff --git a/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
deleted file mode 100644
index e01f1dd0..00000000
--- a/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,142 +0,0 @@
----
-- name: Define set of main disks (system and heketi)
- set_fact:
- disks_info: "{{ disks_info | default([
- {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
- }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ container_storage_disks.split(',') }}"
-
-- name: Define set of additional disks which will be just attached to nodes
- set_fact:
- additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
-
-- name: Create CNS production VMs on vCenter
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ item.value.guestname }}"
- cluster: "{{ vcenter_cluster}}"
- datacenter: "{{ vcenter_datacenter }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- template: "{{vcenter_template_name}}"
- state: poweredon
- wait_for_ip_address: true
- folder: "/{{ vcenter_folder }}"
- annotation: "{{ item.value.tag }}"
- disk: "{{ disks_info }} + {{ additional_disks_info }}"
- hardware:
- memory_mb: 32768
- networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
- customization:
- domain: "{{dns_zone}}"
- dns_suffix: "{{dns_zone}}"
- hostname: "{{ item.value.guestname}}"
- with_dict: "{{host_inventory}}"
- when: "item.value.guesttype in ['cns', ]"
- async: "{{ 6 * 600 }}"
- poll: 0
- register: async_vms_creation
-
-- name: Check async status of VMs creation
- async_status:
- jid: "{{ async_result_item.ansible_job_id }}"
- with_items: "{{ async_vms_creation.results }}"
- loop_control:
- loop_var: "async_result_item"
- register: async_poll_results
- until: async_poll_results.finished
- retries: "{{ 6 * 100 }}"
-
-- name: Read info of newly created VMs
- vmware_guest_tools_wait:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- folder: "/{{ vcenter_folder }}"
- validate_certs: False
- uuid: "{{ item.instance.hw_product_uuid }}"
- with_items: "{{ async_poll_results.results }}"
- register: facts
-
-- name: Map node names and their IP addresses
- set_fact:
- ip4addrs: "{{ ip4addrs | default({}) | combine(
- {item.instance.hw_name: (
- item.instance.hw_eth0.ipaddresses | ipv4 | first)},
- recursive=True) }}"
- hostnames_for_reboot: "{{
- (hostnames_for_reboot | default([])) +
- [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
- with_items: "{{ facts.results }}"
-
-- name: Define glusterfs devices
- set_fact:
- glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
- {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
- with_indexed_items: "{{ disks_info[3::] }}"
-
-- name: Define glusterfs additional devices
- set_fact:
- glusterfs_additional_devices: "{{
- glusterfs_additional_devices | default([])
- }} + {{
- ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
- }}"
- with_indexed_items: "{{ additional_disks_info }}"
-
-- name: Add CNS production VMs to inventory
- add_host:
- hostname: "{{ item.value.guestname }}"
- ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
- ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
- groups: "{{ item.value.tag }}, new_nodes, storage, cns, glusterfs"
- openshift_node_group_name: "node-config-storage"
- # Following vars are for 'openshift_storage_glusterfs' role from
- # 'openshift/openshift-ansible' repo
- glusterfs_devices: "{{ glusterfs_devices }}"
- glusterfs_hostname: "{{ item.value.guestname }}"
- glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
- glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['cns', ]"
-
-# Following updates config file
-# which is required for automated tests from 'glusterfs-containers-tests' repo
-
-- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
- set_fact:
- gluster_servers: "{{
- gluster_servers | default({}) | combine({
- ip4addrs[item.value.guestname]: {
- 'manage': item.value.guestname,
- 'storage': ip4addrs[item.value.guestname],
- 'additional_devices': glusterfs_additional_devices,
- }
- })
- }}"
- with_dict: "{{ host_inventory }}"
- when:
- - item.value.guesttype in ['cns', ]
- - cns_automation_config_file_path | length > 0
-
-- name: Update 'glusterfs-containers-tests' config file
- yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: gluster_servers
- value: "{{ gluster_servers }}"
- when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
deleted file mode 100644
index 05aa63bb..00000000
--- a/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,143 +0,0 @@
----
-- name: Define set of main disks (system and heketi)
- set_fact:
- disks_info: "{{ disks_info | default([
- {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
- }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ container_storage_disks.split(',') }}"
-
-- name: Define set of additional disks which will be just attached to nodes
- set_fact:
- additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
-
-- name: Create CRS production VMs on vCenter
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ item.value.guestname }}"
- cluster: "{{ vcenter_cluster}}"
- datacenter: "{{ vcenter_datacenter }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- template: "{{vcenter_template_name}}"
- state: poweredon
- wait_for_ip_address: true
- folder: "/{{ vcenter_folder }}"
- annotation: "{{ cluster_id }}-crs"
- disk: "{{ disks_info }} + {{ additional_disks_info }}"
- hardware:
- memory_mb: 32768
- networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
- customization:
- domain: "{{dns_zone}}"
- dns_suffix: "{{dns_zone}}"
- hostname: "{{ item.value.guestname}}"
- with_dict: "{{host_inventory}}"
- when: "item.value.guesttype in ['crs', ]"
- async: "{{ 6 * 600 }}"
- poll: 0
- register: async_vms_creation
-
-- name: Check async status of VMs creation
- async_status:
- jid: "{{ async_result_item.ansible_job_id }}"
- with_items: "{{ async_vms_creation.results }}"
- loop_control:
- loop_var: "async_result_item"
- register: async_poll_results
- until: async_poll_results.finished
- retries: "{{ 6 * 100 }}"
-
-- name: Read info of newly created VMs
- vmware_guest_tools_wait:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- folder: "/{{ vcenter_folder }}"
- validate_certs: False
- uuid: "{{ item.instance.hw_product_uuid }}"
- with_items: "{{ async_poll_results.results }}"
- register: facts
-
-- name: Map node names and their IP addresses
- set_fact:
- ip4addrs: "{{ ip4addrs | default({}) | combine(
- {item.instance.hw_name: (
- item.instance.hw_eth0.ipaddresses | ipv4 | first)},
- recursive=True) }}"
- hostnames_for_reboot: "{{
- (hostnames_for_reboot | default([])) +
- [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
- with_items: "{{ facts.results }}"
-
-- name: Define glusterfs devices
- set_fact:
- glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
- {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
- with_indexed_items: "{{ disks_info[3::] }}"
-
-- name: Define glusterfs additional devices
- set_fact:
- glusterfs_additional_devices: "{{
- glusterfs_additional_devices | default([])
- }} + {{
- ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
- }}"
- with_indexed_items: "{{ additional_disks_info }}"
-
-- name: Add CRS production VMs to inventory
- add_host:
- hostname: "{{ item.value.guestname }}"
- ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
- ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
- openshift_node_group_name: "node-config-storage"
- # old groups are: crs, production_group, {{cluster-id}}-crs
- groups: "{{ cluster_id }}-crs, crs, storage, glusterfs"
- # Following vars are for 'openshift_storage_glusterfs' role from
- # 'openshift/openshift-ansible' repo
- glusterfs_devices: "{{ glusterfs_devices }}"
- glusterfs_hostname: "{{ item.value.guestname }}"
- glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
- glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['crs', ]"
-
-# Following updates config file
-# which is required for automated tests from 'glusterfs-containers-tests' repo
-
-- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
- set_fact:
- gluster_servers: "{{
- gluster_servers | default({}) | combine({
- ip4addrs[item.value.guestname]: {
- 'manage': item.value.guestname,
- 'storage': ip4addrs[item.value.guestname],
- 'additional_devices': glusterfs_additional_devices,
- }
- })
- }}"
- with_dict: "{{ host_inventory }}"
- when:
- - item.value.guesttype in ['crs', ]
- - cns_automation_config_file_path | length > 0
-
-- name: Update 'glusterfs-containers-tests' config file
- yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: gluster_servers
- value: "{{ gluster_servers }}"
- when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
deleted file mode 100644
index a0124348..00000000
--- a/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,157 +0,0 @@
----
-- name: Get to know whether we need to add following nodes to "new_nodes" group or not
- set_fact:
- is_add_nodes: "{{ is_add_nodes | default(false) }}"
-
-- name: Define memory and disk parameters per node type
- set_fact:
- host_data:
- master:
- memory: 16384
- disk:
- - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- compute:
- memory: "{{ ('cns' in container_storage) | ternary(32768, 8192) }}"
- disk:
- - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
-
-- name: Create production VMs on vCenter
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ item.value.guestname }}"
- cluster: "{{ vcenter_cluster}}"
- datacenter: "{{ vcenter_datacenter }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- template: "{{vcenter_template_name}}"
- state: poweredon
- wait_for_ip_address: true
- folder: "/{{ vcenter_folder }}"
- annotation: "{{ item.value.tag }}"
- disk: "{{ host_data[item.value.guesttype].disk }}"
- hardware:
- memory_mb: "{{ host_data[item.value.guesttype].memory }}"
- networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
- customization:
- domain: "{{dns_zone}}"
- dns_suffix: "{{ dns_zone }}"
- hostname: "{{ item.value.guestname }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['compute', 'master']"
- async: "{{ 6 * 600 }}"
- poll: 0
- register: async_vms_creation
-
-- name: Check async status of VMs creation
- async_status:
- jid: "{{ async_result_item.ansible_job_id }}"
- with_items: "{{ async_vms_creation.results }}"
- loop_control:
- loop_var: "async_result_item"
- register: async_poll_results
- until: async_poll_results.finished
- retries: "{{ 6 * 100 }}"
-
-- name: Read info of newly created VMs
- vmware_guest_tools_wait:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- folder: "/{{ vcenter_folder }}"
- validate_certs: False
- uuid: "{{ item.instance.hw_product_uuid }}"
- with_items: "{{ async_poll_results.results }}"
- register: facts
-
-- name: Map node names and their IP addresses
- set_fact:
- ip4addrs: "{{ ip4addrs | default({}) | combine(
- {item.instance.hw_name: (
- item.instance.hw_eth0.ipaddresses | ipv4 | first)},
- recursive=True) }}"
- hostnames_for_reboot: "{{
- (hostnames_for_reboot | default([])) +
- [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
- with_items: "{{ facts.results }}"
-
-- name: Add production VMs to inventory
- add_host:
- hostname: "{{ item.value.guestname }}"
- ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
- ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
- groups: "{{ item.value.tag }}, production_group{{ is_add_nodes | ternary(', new_nodes', '')}}"
- openshift_node_group_name: "{{
- (item.value.guesttype == 'master') | ternary('node-config-master',
- 'node-config-compute') }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['compute', 'master']"
-
-# Following updates config file
-# which is required for automated tests from 'glusterfs-containers-tests' repo
-
-- name: Gather data about existing master nodes for tests config file
- set_fact:
- ocp_master_and_client_nodes: "{{
- ocp_master_and_client_nodes | default({}) | combine({
- (
- ((
- (hostvars[item].guest | default({'net': [{
- 'network': vm_network,
- 'ipaddress': [
- ip4addrs[hostvars[item].inventory_hostname_short]
- ]
- }]})).net | selectattr('network', 'equalto', vm_network)
- ) | list)[0].ipaddress | ipv4 | first
- ): {
- 'hostname': hostvars[item].inventory_hostname_short,
- }
- })
- }}"
- with_items: "{{ groups[cluster_id + '-master'] }}"
- when: cns_automation_config_file_path | length > 0
-
-- name: Gather data about existing compute nodes for tests config file
- set_fact:
- ocp_compute_nodes: "{{
- ocp_compute_nodes | default({}) | combine({
- (
- ((
- (hostvars[item].guest | default({'net': [{
- 'network': vm_network,
- 'ipaddress': [
- ip4addrs[hostvars[item].inventory_hostname_short]
- ]
- }]})).net | selectattr('network', 'equalto', vm_network)
- ) | list)[0].ipaddress | ipv4 | first
- ): {
- 'hostname': hostvars[item].inventory_hostname_short,
- }
- })
- }}"
- with_items: "{{ groups[cluster_id + '-compute'] | default([]) }} "
- when: cns_automation_config_file_path | length > 0
-
-- name: Update 'glusterfs-containers-tests' config file
- yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: ocp_servers
- value:
- master: "{{ ocp_master_and_client_nodes }}"
- client: "{{ ocp_master_and_client_nodes }}"
- nodes: "{{ ocp_compute_nodes }}"
- - key: openshift.heketi_config.heketi_client_node
- value: "{{ ocp_master_and_client_nodes.keys()[0] }}"
- - key: openshift.heketi_config.heketi_server_url
- value: "http://{{ ocp_master_and_client_nodes.keys()[0] }}:8080"
- when:
- - ocp_master_and_client_nodes is defined
- - ocp_compute_nodes is defined
diff --git a/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml b/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
deleted file mode 100644
index dfe5e649..00000000
--- a/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Clear yum cache
- command: "yum clean all"
- ignore_errors: true
-
-- name: Install required common rpms
- package:
- name: "{{ item }}"
- state: latest
- with_items:
- - 'iptables'
- - 'iptables-services'
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
-
-- name: Enable Gluster 3 repo
- import_role:
- name: enable-gluster-repo
-
-- name: Install required Gluster 3 rpms
- package:
- name: "{{ item }}"
- state: latest
- with_items:
- - 'redhat-storage-server'
- - 'heketi-client'
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
-
-- name: Install gluster-block package
- package:
- name: "{{ item }}"
- state: latest
- with_items:
- - 'gluster-block'
- retries: 5
- delay: 5
- ignore_errors: yes
-
-- name: Stop firewalld
- service:
- name: firewalld
- state: stopped
- enabled: no
-
-- name: Start Glusterd and iptables
- service:
- name: "{{ item }}"
- state: started
- enabled: true
- with_items:
- - iptables
- - glusterd
-
-- name: Start gluster-blockd service
- service:
- name: "{{ item }}"
- state: started
- enabled: true
- with_items:
- - gluster-blockd
- ignore_errors: yes
diff --git a/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml b/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
deleted file mode 100644
index 062f543a..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-docker_dev: "/dev/sdb"
-docker_vg: "docker-vol"
-docker_data_size: "95%VG"
-docker_dm_basesize: "3G"
-container_root_lv_name: "dockerlv"
-container_root_lv_mount_path: "/var/lib/docker"
diff --git a/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml b/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
deleted file mode 100644
index 70c04802..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: remove any existing docker-storage config file
- file:
- path: /etc/sysconfig/docker-storage
- state: absent
- when: not (openshift_use_crio | default(false) | bool)
-
-- block:
- - name: create the docker-storage config file
- template:
- src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2"
- dest: /etc/sysconfig/docker-storage-setup
- owner: root
- group: root
- mode: 0644
- when:
- - ansible_distribution_version | version_compare('7.4', '>=')
- - ansible_distribution == "RedHat"
- - not (openshift_use_crio | default(false) | bool)
-
-- block:
- - name: create the docker-storage-setup config file
- template:
- src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
- dest: /etc/sysconfig/docker-storage-setup
- owner: root
- group: root
- mode: 0644
- when:
- - ansible_distribution_version | version_compare('7.4', '<')
- - ansible_distribution == "RedHat"
- - not (openshift_use_crio | default(false) | bool)
-
-- name: start docker
- service:
- name: docker
- state: started
- enabled: true
- when: not (openshift_use_crio | default(false) | bool)
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
deleted file mode 100644
index b5869fef..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-DEVS="{{ docker_dev }}"
-VG="{{ docker_vg }}"
-DATA_SIZE="{{ docker_data_size }}"
-EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
deleted file mode 100644
index 61ba30af..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-DEVS="{{ docker_dev }}"
-VG="{{ docker_vg }}"
-DATA_SIZE="{{ docker_data_size }}"
-STORAGE_DRIVER=overlay2
-CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
-CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
-CONTAINER_ROOT_LV_SIZE=100%FREE \ No newline at end of file
diff --git a/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml b/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
deleted file mode 100644
index 7236d77d..00000000
--- a/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Enable main Gluster 3 repo with GA packages
- command: "subscription-manager repos --enable=rh-gluster-3-for-rhel-7-server-rpms"
-# when: gluster_puddle_repo == ''
-
-- name: Create additional repo with downstream packages for Gluster 3
- yum_repository:
- name: "downstream-rh-gluster-3-for-rhel-7-server-rpms"
- baseurl: "{{ gluster_puddle_repo }}"
- description: "Downstream repo with development versions of packages for Gluster 3"
- enabled: "yes"
- gpgcheck: "no"
- sslverify: "no"
- cost: 990
- when: gluster_puddle_repo != ''
diff --git a/deployment/playbooks/roles/etcd-storage/tasks/main.yaml b/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
deleted file mode 100644
index fe13dc17..00000000
--- a/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Create openshift volume group
- lvg: vg=etcd_vg pvs=/dev/sdd
-
-- name: Create lvm volumes
- lvol: vg=etcd_vg lv=etcd_lv size=95%FREE state=present shrink=no
-
-- name: Create local partition on lvm lv
- filesystem:
- fstype: xfs
- dev: /dev/etcd_vg/etcd_lv
-
-- name: Make mounts owned by nfsnobody
- file: path=/var/lib/etcd state=directory mode=0755
-
-- name: Mount the partition
- mount:
- name: /var/lib/etcd
- src: /dev/etcd_vg/etcd_lv
- fstype: xfs
- state: present
-
-- name: Remount new partition
- command: "mount -a"
diff --git a/deployment/playbooks/roles/gluster-ports/defaults/main.yaml b/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
deleted file mode 100644
index fadcb096..00000000
--- a/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-gluster_ports: ['24007', '24008', '2222', '49152:49664', '24010', '3260', '111']
-crs_ports: ['8080']
diff --git a/deployment/playbooks/roles/gluster-ports/tasks/main.yaml b/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
deleted file mode 100644
index a3f0565b..00000000
--- a/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: open gluster ports
- iptables:
- chain: INPUT
- destination_port: "{{ item }}"
- jump: ACCEPT
- ctstate: NEW
- protocol: tcp
- action: insert
- match: tcp
- with_items: "{{ gluster_ports }}"
- when: groups['storage'] is defined and groups['storage'] != []
- register: rule
-
-- name: save iptables
- shell: iptables-save > /etc/sysconfig/iptables
- when: rule|changed
-
-- name: open gluster ports
- iptables:
- chain: INPUT
- destination_port: "{{ item }}"
- ctstate: NEW
- jump: ACCEPT
- protocol: tcp
- action: insert
- match: tcp
- with_items: "{{ crs_ports }}"
- when: groups['crs'] is defined and groups['crs'] != []
- register: heketi
-
-- name: save iptables
- shell: iptables-save > /etc/sysconfig/iptables
- when: heketi|changed
diff --git a/deployment/playbooks/roles/instance-groups/tasks/main.yaml b/deployment/playbooks/roles/instance-groups/tasks/main.yaml
deleted file mode 100644
index f0f3c0f9..00000000
--- a/deployment/playbooks/roles/instance-groups/tasks/main.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
----
-# create rhsm_user, rhsm_password, rhsm_subscription_pool and
-# rhsm_server for functionality with older rhsm_user
-- name: Set deprecated fact for rhel_subscription_user
- set_fact:
- rhsm_user: "{{ rhel_subscription_user }}"
- when: rhel_subscription_user is defined
-
-- name: Set deprecated fact for rhel_subscription_pass
- set_fact:
- rhsm_password: "{{ rhel_subscription_pass }}"
- when: rhel_subscription_pass is defined
-
-- name: Set deprecated fact for rhel_subscription_pool
- set_fact:
- rhsm_pool: "{{ rhel_subscription_pool }}"
- when: rhel_subscription_pool is defined
-
-- name: Add masters to requisite groups
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, masters, etcd, nodes, cluster_hosts, master, OSEv3
- openshift_node_group_name: "node-config-master{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-master'] }}"
- when:
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add masters to requisite groups
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, masters, etcd, nodes, cluster_hosts, master, OSEv3
- openshift_node_group_name: "node-config-master"
- openshift_node_labels:
- role: master
- node-role.kubernetes.io/master: true
- with_items: "{{ groups[cluster_id + '-master'] }}"
- when:
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add a master to the single master group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: single_master
- openshift_node_group_name: "node-config-master{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-master'][0] }}"
- when:
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add a master to the single master group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: single_master
- openshift_node_group_name: "node-config-master"
- openshift_node_labels:
- role: master
- node-role.kubernetes.io/master: true
- with_items: "{{ groups[cluster_id + '-master'][0] }}"
- when:
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add compute instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute, OSEv3
- openshift_node_group_name: "node-config-compute{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-compute'] }}"
- when:
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add compute instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute, OSEv3
- openshift_node_group_name: "node-config-compute"
- openshift_node_labels:
- role: compute
- node-role.kubernetes.io/compute: true
- node-role.kubernetes.io/infra: true
- with_items: "{{ groups[cluster_id + '-compute'] }}"
- when:
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add new node instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, new_nodes
- openshift_node_group_name: "node-config-compute{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups.tag_provision_node | default([]) }}"
- when:
- - add_node is defined
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add new node instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, new_nodes
- openshift_node_group_name: "node-config-compute"
- openshift_node_labels:
- role: "{{ node_type }}"
- node-role.kubernetes.io/compute: true
- node-role.kubernetes.io/infra: true
- with_items: "{{ groups.tag_provision_node | default([]) }}"
- when:
- - add_node is defined
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add cns instances to allnodes
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, OSEv3
- openshift_node_group_name: "node-config-storage{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-storage'] | default([]) }}"
-
-- name: Add crs instances to allnodes
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, OSEv3
- openshift_node_group_name: "node-config-storage"
- with_items: "{{ groups[cluster_id + '-crs'] | default([]) }}"
-
-- name: Add cns instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: nodes, cluster_hosts, schedulable_nodes, storage
- openshift_node_group_name: "node-config-storage{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-storage'] }}"
- when:
- - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add cns instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: nodes, cluster_hosts, schedulable_nodes, storage
- openshift_node_labels:
- role: storage
- node-role.kubernetes.io/storage: true
- openshift_node_group_name: "node-config-storage"
- with_items: "{{ groups[cluster_id + '-storage'] }}"
- when:
- - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add crs nodes to the storage group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: storage, crs
- openshift_node_group_name: "node-config-storage"
- with_items: "{{ groups[cluster_id + '-crs'] }}"
- when:
- - "'crs' in container_storage and add_node is defined and 'storage' in node_type"
diff --git a/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml b/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
deleted file mode 100644
index de9230d1..00000000
--- a/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Install git
- package:
- name: git
- state: latest
- when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
deleted file mode 100644
index cd74c20e..00000000
--- a/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-local_volumes_device: "/dev/sdc"
-local_volumes_fstype: "xfs"
-local_volumes_fsopts: "gquota"
-local_volumes_path: "/var/lib/origin/openshift.local.volumes"
diff --git a/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
deleted file mode 100644
index df58fe80..00000000
--- a/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Create filesystem for /var/lib/origin/openshift.local.volumes
- filesystem:
- fstype: "{{ local_volumes_fstype }}"
- dev: "{{ local_volumes_device }}"
-
-- name: Create local volumes directory
- file:
- path: "{{ local_volumes_path }}"
- state: directory
- recurse: yes
-
-- name: Create fstab entry
- mount:
- name: "{{ local_volumes_path }}"
- src: "{{ local_volumes_device }}"
- fstype: "{{ local_volumes_fstype }}"
- opts: "{{ local_volumes_fsopts }}"
- state: present
-
-- name: Mount fstab entry
- mount:
- name: "{{ local_volumes_path }}"
- src: "{{ local_volumes_device }}"
- fstype: "{{ local_volumes_fstype }}"
- opts: "{{ local_volumes_fsopts }}"
- state: mounted
diff --git a/deployment/playbooks/roles/package-repos/tasks/main.yaml b/deployment/playbooks/roles/package-repos/tasks/main.yaml
deleted file mode 100644
index 3492a9e4..00000000
--- a/deployment/playbooks/roles/package-repos/tasks/main.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Import RHSM role
- import_role:
- name: rhsm
-
-- name: Evaluate OCP repo name
- set_fact:
- tmp_ose_repo_name: "rhel-7-server-ose-3.{{ openshift_vers.split('_')[-1] }}-rpms"
-
-- name: Disable OpenShift 3.X GA repo
- command: "subscription-manager repos --disable={{ tmp_ose_repo_name }}"
- when: (ose_puddle_repo != '') or ('crs' in group_names)
-
-- name: Create additional repo with downstream packages for OpenShift 3.X
- yum_repository:
- name: "downstream-{{ tmp_ose_repo_name }}"
- baseurl: "{{ ose_puddle_repo }}"
- description: "Downstream repo with development versions of packages for OpenShift"
- enabled: "{{ (ose_puddle_repo != '') | ternary('yes', 'no') }}"
- gpgcheck: "no"
- sslverify: "no"
- cost: 900
- when: (ose_puddle_repo != '') and ('crs' not in group_names)
diff --git a/deployment/playbooks/roles/prerequisites/defaults/main.yaml b/deployment/playbooks/roles/prerequisites/defaults/main.yaml
deleted file mode 100644
index 1705ee4f..00000000
--- a/deployment/playbooks/roles/prerequisites/defaults/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-openshift_required_packages:
-- iptables
-- iptables-services
-- NetworkManager
-- docker{{ '-' + docker_version if docker_version is defined else '' }}
diff --git a/deployment/playbooks/roles/prerequisites/library/openshift_facts.py b/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
deleted file mode 120000
index e0061bb7..00000000
--- a/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-/usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py \ No newline at end of file
diff --git a/deployment/playbooks/roles/prerequisites/library/rpm_q.py b/deployment/playbooks/roles/prerequisites/library/rpm_q.py
deleted file mode 100644
index afc261ba..00000000
--- a/deployment/playbooks/roles/prerequisites/library/rpm_q.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2015, Tobias Florek <tob@butter.sh>
-# Licensed under the terms of the MIT License
-"""
-An ansible module to query the RPM database. For use, when yum/dnf are not
-available.
-"""
-
-# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
-from ansible.module_utils.basic import * # noqa: F403
-
-DOCUMENTATION = """
----
-module: rpm_q
-short_description: Query the RPM database
-author: Tobias Florek
-options:
- name:
- description:
- - The name of the package to query
- required: true
- state:
- description:
- - Whether the package is supposed to be installed or not
- choices: [present, absent]
- default: present
-"""
-
-EXAMPLES = """
-- rpm_q: name=ansible state=present
-- rpm_q: name=ansible state=absent
-"""
-
-RPM_BINARY = '/bin/rpm'
-
-
-def main():
- """
- Checks rpm -q for the named package and returns the installed packages
- or None if not installed.
- """
- module = AnsibleModule( # noqa: F405
- argument_spec=dict(
- name=dict(required=True),
- state=dict(default='present', choices=['present', 'absent'])
- ),
- supports_check_mode=True
- )
-
- name = module.params['name']
- state = module.params['state']
-
- # pylint: disable=invalid-name
- rc, out, err = module.run_command([RPM_BINARY, '-q', name])
-
- installed = out.rstrip('\n').split('\n')
-
- if rc != 0:
- if state == 'present':
- module.fail_json(msg="%s is not installed" % name,
- stdout=out, stderr=err, rc=rc)
- else:
- module.exit_json(changed=False)
- elif state == 'present':
- module.exit_json(changed=False, installed_versions=installed)
- else:
- module.fail_json(msg="%s is installed", installed_versions=installed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/deployment/playbooks/roles/prerequisites/tasks/main.yaml b/deployment/playbooks/roles/prerequisites/tasks/main.yaml
deleted file mode 100644
index a2686796..00000000
--- a/deployment/playbooks/roles/prerequisites/tasks/main.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-- name: Gather facts
- openshift_facts:
- role: common
-
-- block:
- - name: Clear yum cache
- command: "yum clean all"
- ignore_errors: true
-
- - name: Install the required rpms
- package:
- name: "{{ item }}"
- state: latest
- with_items: "{{ openshift_required_packages }}"
-
- - name: Start NetworkManager and network
- service:
- name: "{{ item }}"
- state: restarted
- enabled: true
- with_items:
- - NetworkManager
- - network
-
- - name: Determine if firewalld is installed
- rpm_q:
- name: "firewalld"
- state: present
- register: firewalld_installed
- failed_when: false
-
- - name: Stop firewalld
- service:
- name: firewalld
- state: stopped
- enabled: false
- when:
- - "{{ firewalld_installed.installed_versions | default([]) | length > 0 }}"
-
- - name: Start iptables
- service:
- name: iptables
- state: started
- enabled: true
-
- - name: Start docker
- service:
- name: docker
- state: started
- enabled: true
-
- when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
-
-# Fail as early as possible if Atomic and old version of Docker
-- block:
- - name: Determine Atomic Host Docker Version
- shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
- register: l_atomic_docker_version
-
- - assert:
- msg: Installation on Atomic Host requires Docker 1.12 or later. Attempting to patch.
- that:
- - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
-
- rescue:
- - name: Patching Atomic instances
- shell: atomic host upgrade
- register: patched
-
- - name: Reboot when patched
- shell: sleep 5 && shutdown -r now "Reboot due to Atomic Patching"
- async: 1
- poll: 0
- ignore_errors: true
- when: patched.changed
-
- - name: Wait for hosts to be back
- pause:
- seconds: 60
- delegate_to: 127.0.0.1
- when: patched.changed
-
- when: (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml b/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
deleted file mode 100644
index 9b9f3b21..00000000
--- a/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Is the host already registered?
- command: "subscription-manager list"
- register: subscribed
- ignore_errors: yes
-
- - name: Unregister host
- redhat_subscription:
- state: absent
- when: "'Subscribed' in subscribed.stdout"
- ignore_errors: yes
-
- when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/rhsm/defaults/main.yaml b/deployment/playbooks/roles/rhsm/defaults/main.yaml
deleted file mode 100644
index 3207411f..00000000
--- a/deployment/playbooks/roles/rhsm/defaults/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-openshift_required_repos:
-- 'rhel-7-server-rpms'
-- 'rhel-7-server-extras-rpms'
-- 'rhel-7-fast-datapath-rpms'
diff --git a/deployment/playbooks/roles/rhsm/tasks/main.yaml b/deployment/playbooks/roles/rhsm/tasks/main.yaml
deleted file mode 100644
index f793fb2f..00000000
--- a/deployment/playbooks/roles/rhsm/tasks/main.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- block:
- - name: Allow rhsm a longer timeout to help out with subscription-manager
- lineinfile:
- dest: /etc/rhsm/rhsm.conf
- line: 'server_timeout=600'
- insertafter: '^proxy_password ='
-
- - name: Is the system already registered?
- command: "subscription-manager version"
- register: subscribed
-
- - name: Unregister system if registered
- import_role:
- name: rhsm-unregister
- when:
- - "'not registered' not in subscribed.stdout"
-
- - name: Register system using Red Hat Subscription Manager
- redhat_subscription:
- state: present
- username: "{{ rhsm_user | default(omit) }}"
- password: "{{ rhsm_password | default(omit) }}"
- pool: "{{ rhsm_pool | default(omit) }}"
- server_hostname: "{{ rhsm_satellite | default(omit) }}"
- when:
- - "'not registered' in subscribed.stdout"
- - rhsm_user is defined
- - rhsm_user|trim != ''
- register: rhn
- until: rhn|success
- retries: 5
-
- - name: Obtain currently enabled repos
- shell: 'subscription-manager repos --list-enabled | sed -ne "s/^Repo ID:[^a-zA-Z0-9]*\(.*\)/\1/p"'
- register: enabled_repos
-
- - name: Disable repositories that should not be enabled
- shell: "subscription-manager repos --disable={{ item }}"
- with_items:
- - "{{ enabled_repos.stdout_lines | difference(openshift_required_repos) }}"
- when: provider is not defined
-
- - name: Enable specified repositories not already enabled
- command: "subscription-manager repos --enable={{ item }}"
- with_items:
- - "{{ openshift_required_repos | difference(enabled_repos.stdout_lines) }}"
-
- when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
deleted file mode 100644
index e9e06809..00000000
--- a/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-# NOTE(vponomar): here we use 2 different sources of IP addresses:
-# 1) hostvars[item].guest.net exists for old nodes, that haven't been created
-# with this playbook run. Such nodes have detailed info in hostvars.
-# 2) hostvars[item].ansible_ssh_host is always correct IP address for newly
-# created nodes. For such nodes we pick it when variant 1 does not work.
-- name: Save matched hosts to temporary var
- set_fact:
- current_cluster_hosts: "{{
- current_cluster_hosts | default([]) | union([{
- 'name_short': hostvars[item].inventory_hostname_short,
- 'name': hostvars[item].inventory_hostname,
- 'net': (hostvars[item].guest | default({})).net | default(
- [{'network': vm_network,
- 'ipaddress': [hostvars[item].ansible_ssh_host]}])
- }]) }}"
- with_items: "{{ groups.all | select('match', ocp_hostname_prefix) | list }}"
-
-- name: Gather current cluster IP addresses
- set_fact:
- current_cluster_ips: "{{
- current_cluster_ips | default({}) | combine({
- (item.1.ipaddress | ipv4 | first): [item.0.name_short, item.0.name]
- }) }}"
- with_subelements: ["{{ current_cluster_hosts }}", net]
- when: "item.1.network == vm_network"
-
-- name: Get current user home dir
- shell: 'eval echo "~$USER"'
- register: home_dir
-- name: Set hosts files paths
- set_fact:
- home_hosts_file: "{{ home_dir.stdout_lines[0] + '/.ssh/config' }}"
- system_hosts_file: "/etc/hosts"
-- name: Check 'write' permissions for system hosts file
- stat:
- path: "{{ system_hosts_file }}"
- register: stat_system_hosts
-
-- name: Update system hosts file if writeable
- block:
- - name: Delete old left-overs if exist
- lineinfile:
- dest: "{{ system_hosts_file }}"
- regexp: '{{ item.name_short }}'
- state: absent
- create: true
- with_items: "{{ current_cluster_hosts }}"
- - name: Add domain name mapping of new cluster nodes to the system hosts file
- lineinfile:
- dest: "{{ system_hosts_file }}"
- line: '{{ item.key }} {{ item.value.0 }} {{ item.value.1 }}'
- create: true
- with_dict: "{{ current_cluster_ips }}"
- when: "stat_system_hosts.stat.writeable"
-
-- name: Update user's SSH hosts file
- block:
- - name: Delete old left-overs if exist
- lineinfile:
- path: "{{ home_hosts_file }}"
- state: absent
- regexp: "{{ item.key }}"
- create: true
- mode: '644'
- with_dict: "{{ current_cluster_ips }}"
- - name: Write line with option group
- lineinfile:
- dest: "{{ home_hosts_file }}"
- state: present
- line: "Host {{ item.value.0 }} {{ item.value.1 }}"
- create: true
- mode: '644'
- with_dict: "{{ current_cluster_ips }}"
- - name: Write line with hostname option
- lineinfile:
- dest: "{{ home_hosts_file }}"
- state: present
- line: " HostName {{ item.key }}"
- insertafter: "Host {{ item.value.0 }} {{ item.value.1 }}"
- create: true
- mode: '644'
- with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
deleted file mode 100644
index d53fa43f..00000000
--- a/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Import role with update of /etc/hosts file
- import_role:
- name: setup-custom-domain-names-for-ansible-runner
-
-- name: Create directory for dnsmasq config file if absent
- file:
- dest: /etc/dnsmasq.d
- state: directory
- mode: 0644
-
-- name: Create custom dnsmasq config file for current cluster
- file:
- dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
- state: touch
-
-- name: Remove stale data from custom dnsmasq config file is exist
- lineinfile:
- dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
- regexp: "{{ item.value.0 }}"
- state: absent
- with_dict: "{{ current_cluster_ips }}"
-
-- name: Write data to custom dnsmasq config file
- lineinfile:
- dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
- line: "address=/{{ item.value.0 }}/{{ item.key }}\naddress=/{{ item.value.1 }}/{{ item.key }}"
- state: present
- with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml b/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
deleted file mode 100644
index d42484e0..00000000
--- a/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Copy cloud provider storage class file
- template:
- src: cloud-provider-storage-class.yaml.j2
- dest: ~/cloud-provider-storage-class.yaml
-
-- name: Copy cloud provider storage class file to single master
- fetch:
- src: ~/cloud-provider-storage-class.yaml
- dest: ~/cloud-provider-storage-class.yaml
- flat: yes
-
-- name: Switch to default project
- command: oc project default
-
-- name: Check to see if storage class is already created
- command: "oc get storageclass"
- register: storage_class
-
-- name: Create storage class
- command: "oc create -f ~/cloud-provider-storage-class.yaml"
- when: "'{{ vcenter_datastore }}' not in storage_class.stdout"
diff --git a/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2 b/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
deleted file mode 100644
index e31d53a4..00000000
--- a/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: "{{ vcenter_datastore }}"
-provisioner: kubernetes.io/vsphere-volume
-parameters:
- diskformat: zeroedthick
- datastore: "{{ vcenter_datastore }}"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
deleted file mode 100644
index 67898e0c..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart chronyd
- service: name=chronyd state=restarted
-
-- name: restart networking
- service: name=networking state=restarted
diff --git a/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
deleted file mode 100644
index e640b861..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
+++ /dev/null
@@ -1,89 +0,0 @@
----
-- name: Determine if Atomic
- stat: path=/run/ostree-booted
- register: s
- changed_when: false
- check_mode: no
-
-- name: Init the is_atomic fact
- set_fact:
- is_atomic: false
-
-- name: Set the is_atomic fact
- set_fact:
- is_atomic: true
- when: s.stat.exists
-
-- block:
- - name: Install 'sos' package
- yum:
- name: sos
- state: installed
- ignore_errors: yes
- - name: be sure all pre-req packages are installed
- yum: name={{item}} state=installed
- with_items:
- - open-vm-tools
- - PyYAML
- - perl
- - python-ipaddress
- - net-tools
- - chrony
- - python-six
- - iptables
- - iptables-services
- - dnsmasq
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- - name: Install docker
- yum: name={{item}} state=installed
- with_items:
- - docker{{ '-' + docker_version if docker_version is defined else '' }}
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- when: not (openshift_use_crio | default(false) | bool)
-
- - name: be sure openvmtools is running and enabled
- service: name=vmtoolsd state=started enabled=yes
- when:
- - not is_atomic | bool
- - ansible_distribution == "RedHat"
-
-- name: be sure chrony is configured
- template: src=chrony.conf.j2 dest=/etc/chrony.conf
- notify:
- - restart chronyd
-
-- name: set link to localtime
- command: timedatectl set-timezone {{timezone}}
-
-- name: be sure chronyd is running and enabled
- service: name=chronyd state=started enabled=yes
-
-- block:
- - name: (Atomic) Remove extra docker lv from root vg
- lvol:
- lv: docker-pool
- vg: atomicos
- state: absent
- force: yes
- - name: (Atomic) Grow root lv to fill vg
- lvol:
- lv: root
- vg: atomicos
- size: +100%FREE
- - name: (Atomic) Grow root fs to match lv
- filesystem:
- dev: /dev/mapper/atomicos-root
- fstype: xfs
- resizefs: yes
- - name: (Atomic) Force Ansible to re-gather disk facts
- setup:
- filter: 'ansible_mounts'
- when:
- - is_atomic | bool
- - ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2 b/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
deleted file mode 100644
index b8020cb0..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file is managed by Ansible
-
-server 0.rhel.pool.ntp.org
-server 1.rhel.pool.ntp.org
-server 2.rhel.pool.ntp.org
-server 3.rhel.pool.ntp.org
-
-driftfile /var/lib/chrony/drift
-makestep 10 3
-
-keyfile /etc/chrony.keys
-commandkey 1
-generatecommandkey
-
-noclientlog
-logchange 0.5
-
-logdir /var/log/chrony
-log measurements statistics tracking
diff --git a/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
deleted file mode 100644
index a951d622..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-locale: en_US.UTF-8
-timezone: UTC
diff --git a/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml b/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
deleted file mode 100644
index 826ff498..00000000
--- a/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# NOTE(vponomar): this role should not be run from nodes
-# which are going to be rebooted.
----
-
-- block:
- - name: Check that hostnames_for_reboot var is set and it is not empty list
- fail:
- msg: "Role 'yum-update-and-reboot' expects 'hostnames_for_reboot' var
- to be set as a list of hostnames which should be rebooted."
- when: "(hostnames_for_reboot is not defined) or hostnames_for_reboot | length < 1"
-
- - name: Run yum_update command
- command: "yum update -y {{ (openshift_vers in ['v3_6', 'v3_7']) |
- ternary('--exclude=*docker*', '') }}"
- delegate_to: "{{ item }}"
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Reboot machine to apply all major changes to the system if exist
- shell: "sleep 3 ; /sbin/shutdown -r now 'Reboot triggered by Ansible'"
- async: 1
- poll: 0
- ignore_errors: true
- delegate_to: "{{ item }}"
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Wait for machine to go down
- wait_for:
- host: "{{ item }}"
- port: 22
- delay: 0
- timeout: 180
- connect_timeout: 5
- state: stopped
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Wait for machine to go up
- wait_for:
- host: "{{ item }}"
- port: 22
- delay: 0
- timeout: 360
- connect_timeout: 5
- state: started
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Sleep for some time to let services start up in time
- shell: "sleep 60"
- when: "disable_yum_update_and_reboot is undefined or not (disable_yum_update_and_reboot | bool)"
diff --git a/deployment/playbooks/scaleup.yaml b/deployment/playbooks/scaleup.yaml
deleted file mode 100644
index 4a21eadc..00000000
--- a/deployment/playbooks/scaleup.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'byo/openshift-node/scaleup.yml',
- 'noop.yaml')
- }}"
-
-- include: "{{ (openshift_vers in ['v3_9']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'openshift-node/scaleup.yml',
- 'noop.yaml')
- }}"
-
-# NOTE(vponomar): following playbooks are what we need from
-# 'playbooks/openshift-node/scaleup.yml' playbook in OCP3.10 and OCP3.11
-# It may be changed for OCP3.11+ versions.
-- include: "{{ (openshift_vers not in ['v3_6', 'v3_7', 'v3_9']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'openshift-node/private/bootstrap.yml',
- 'noop.yaml')
- }}"
-
-- include: "{{ (openshift_vers not in ['v3_6', 'v3_7', 'v3_9']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'openshift-node/private/join.yml',
- 'noop.yaml')
- }}"
diff --git a/deployment/playbooks/setup.yaml b/deployment/playbooks/setup.yaml
deleted file mode 100644
index 2166c2fc..00000000
--- a/deployment/playbooks/setup.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- hosts: localhost
- user: root
- become: false
- vars_files:
- - vars/main.yaml
- tasks:
- - name: "Create resource pool on vCenter"
- vmware_resource_pool:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- cluster: "{{ vcenter_cluster}}"
- resource_pool: "{{ vcenter_resource_pool }}"
- state: "present"
- validate_certs: False
- - name: "Create folder structure on vCenter"
- vmware_folder:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- cluster: "{{ vcenter_cluster}}"
- folder: "{{ vcenter_folder }}"
- state: "present"
- validate_certs: False
diff --git a/deployment/playbooks/vars/main.yaml b/deployment/playbooks/vars/main.yaml
deleted file mode 100644
index 0b5a95af..00000000
--- a/deployment/playbooks/vars/main.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
----
-# OpenShift variables
-openshift_master_cluster_hostname: "{{ lb_host }}"
-openshift_master_cluster_public_hostname: "{{ lb_host }}"
-console_port: 8443
-openshift_vers: "{{ openshift_vers | default('v3_6')}}"
-openshift_major_version: "{{ openshift_vers.split('_')[-1] }}"
-openshift_ansible_branch: release-3.{{ openshift_major_version }}
-openshift_required_repos:
-- rhel-7-server-rpms
-- rhel-7-server-extras-rpms
-- rhel-7-server-ose-3.{{ openshift_major_version }}-rpms
-- rhel-7-fast-datapath-rpms
-openshift_crio_docker_gc_node_selector:
- runtime: crio
-# 'openshift_node_groups' is required for OCP3.10
-openshift_node_groups:
-- name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
-- name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-- name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
-- name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-- name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
-- name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
diff --git a/deployment/scripts/install_openshift_ansible.sh b/deployment/scripts/install_openshift_ansible.sh
deleted file mode 100755
index c258587a..00000000
--- a/deployment/scripts/install_openshift_ansible.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#! /bin/bash
-#
-# List of expected input args:
-# - $1 is an env dir, i.e '/home/username/.../.tox/ocp3.6'
-# - $2 is a tag or PR to checkout from,
-# 1) TAG -> i.e. 'openshift-ansible-3.6.173.0.96-1' for OCP v3.6
-# See list of tags here: https://github.com/openshift/openshift-ansible/tags
-# 2) PR -> 'pull/12345/head'. Where '12345' is ID of a PR.
-# See list of PRs here: https://github.com/openshift/openshift-ansible/pulls
-# Note that PR is checked out, not cherry-picked.
-
-OPENSHIFT_ANSIBLE_GIT_URL='git://github.com/openshift/openshift-ansible.git'
-TARGET_DIR=$1/usr/share/ansible/openshift-ansible
-TAG=$2
-
-if [ -z "$TAG" ]; then
- # NOTE(vponomar): get latest tag by 3.X branch
- TAG=$(git ls-remote --tags $OPENSHIFT_ANSIBLE_GIT_URL \
- "refs/tags/openshift-ansible-$(echo $1 | grep -oE '[^tox\/ocp]+$').*" \
- | grep -v "\{\}" | sort -t / -k 3 -V | tail -n 1 | awk '{print $2}' )
- echo "Custom Git tag hasn't been specified, using latest Git tag '$TAG'"
-else
- echo "Using custom Git tag '$TAG'"
-fi
-
-TAG=${TAG/refs\/tags\//}
-
-if [[ ! -d $TARGET_DIR ]]; then
- mkdir -p $TARGET_DIR
- git clone --single-branch $OPENSHIFT_ANSIBLE_GIT_URL $TARGET_DIR
-fi
-
-cd $TARGET_DIR
-git fetch origin $TAG
-git reset --hard FETCH_HEAD
diff --git a/deployment/scripts/install_yedit_for_ansible.sh b/deployment/scripts/install_yedit_for_ansible.sh
deleted file mode 100755
index ac4b0c44..00000000
--- a/deployment/scripts/install_yedit_for_ansible.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#! /bin/bash
-#
-# List of expected input args:
-# - $1 is an env dir, i.e '/home/username/.../.tox/ocp3.6'
-# - $2 is a tag or branch name to checkout from.
-
-YEDIT_GIT_URL='git://github.com/vponomaryov/yedit.git'
-TARGET_DIR=$1/src/yedit
-
-if [[ ! -d $TARGET_DIR ]]; then
- mkdir -p $TARGET_DIR
- git clone $YEDIT_GIT_URL --single-branch --branch $2 $TARGET_DIR
-else
- cd $TARGET_DIR
- git fetch -t --all
- git reset --hard $2
-fi
diff --git a/deployment/tox.ini b/deployment/tox.ini
deleted file mode 100644
index cd44bacf..00000000
--- a/deployment/tox.ini
+++ /dev/null
@@ -1,122 +0,0 @@
-# If "pip" is not installed, install it running following command:
-# $ yum install python-pip
-#
-# If "tox" is not installed, install it running following command:
-# $ pip install -e git://github.com/tox-dev/tox.git@2.9.1#egg=tox
-#
-# After it you can use "tox" command. For example:
-# $ tox -e ocp3.7 -- python ocp-on-vmware.py --create_inventory
-
-[tox]
-# With version 1.6.0 'skipsdist' config option was added. It allows to skip
-# installation of current project to 'sdist' (no req to define setup.py file).
-minversion = 1.6.0
-skipsdist = True
-sitepackages = False
-envlist = readme
-
-[testenv]
-basepython = python2.7
-envdir = {toxworkdir}/{envname}
-passenv = OPENSHIFT_ANSIBLE_GIT_TAG
-setenv =
- OADIR={envdir}/usr/share/ansible/openshift-ansible
- ANSIBLE_ROLES_PATH={env:OADIR}/roles:{toxinidir}/playbooks/roles
- ANSIBLE_CALLBACK_PLUGINS={env:OADIR}/callback_plugins
- ANSIBLE_FILTER_PLUGINS={env:OADIR}/filter_plugins
- ANSIBLE_LOOKUP_PLUGINS={env:OADIR}/lookup_plugins
- ANSIBLE_LIBRARY={env:OADIR}/roles/etcd_common/library:{env:OADIR}/roles/lib_openshift/library:{env:OADIR}/roles/lib_utils/library:{env:OADIR}/roles/openshift_certificate_expiry/library:{env:OADIR}/roles/openshift_cli/library:{env:OADIR}/roles/openshift_facts/library:{env:OADIR}/roles/openshift_health_checker/library:{env:OADIR}/roles/openshift_logging/library:{env:OADIR}/roles/os_firewall/library:{env:OADIR}/library:{env:OADIR}/roles/etcd/library:{env:OADIR}/roles/lib_os_firewall/library:{env:OADIR}/roles/openshift_sanitize_inventory/library:{envdir}/src/yedit/roles/lib_yaml_editor/library
- ANSIBLE_INVENTORY={toxinidir}/inventory/vsphere/vms/vmware_inventory.py
- ANSIBLE_SSH_ARGS="-C -o ControlMaster=auto -o ControlPersist=60s -F {homedir}/.ssh/config"
-whitelist_externals = *
-commands =
- touch {homedir}/.ssh/config
- python -m pip install --upgrade pip>=9.0.0 setuptools wheel
- pip install \
- cryptography \
- pyyaml \
- dnspython \
- ipaddress \
- ipaddr \
- iptools \
- netaddr \
- pyvmomi \
- click \
- pyOpenSSL \
- passlib \
- Jinja2>=2.8
- bash -ec "yum -y install git libselinux-python || echo 'WARNING! Failed to run yum command. Make sure you have enough rights. Continuing assuming that yum packages are installed.'"
- mkdir -p {envdir}/lib/python2.7/site-packages
- bash -ec "if [ ! -e {envdir}/lib/python2.7/site-packages/selinux ]; then \
- ln -s /usr/lib64/python2.7/site-packages/selinux \
- {envdir}/lib/python2.7/site-packages/selinux ; \
- fi"
- find . -type f -name "*.py[c|o]" -delete
- {toxinidir}/scripts/install_openshift_ansible.sh \
- {envdir} {env:OPENSHIFT_ANSIBLE_GIT_TAG}
- {toxinidir}/scripts/install_yedit_for_ansible.sh {envdir} master
-
-[testenv:readme]
-commands =
- echo -e 'To create environment for installation of '\
- 'OpenShift (OCP) 3.11 run following command:\n\n'\
- ' $ tox -e ocp3.11\n\n'\
- 'or for version 3.10 run following command:\n\n'\
- ' $ tox -e ocp3.10\n\n'\
- 'or for version 3.9 run following command:\n\n'\
- ' $ tox -e ocp3.9\n\n'\
- 'or for version 3.7 run following command:\n\n'\
- ' $ tox -e ocp3.7\n\n'\
- 'or for version 3.6 run following:\n\n'\
- ' $ tox -e ocp3.6\n'
-
-[testenv:ocp3.6]
-commands =
- {[testenv]commands}
- {envdir}/bin/pip install \
- -v -e "git://github.com/ansible/ansible.git@v2.4.3.0-1#egg=ansible"
- bash -c "export ANSIBLE_LOG_PATH={toxinidir}/ansible_{envname}_`date +%Y_%m_%d__%H_%M_%S`.log ; {posargs:echo 'No commands have been specified. Exiting.'}"
-setenv =
- {[testenv]setenv}
- OPENSHIFT_ANSIBLE_GIT_TAG={env:OPENSHIFT_ANSIBLE_GIT_TAG:''}
-
-[testenv:ocp3.7]
-commands =
- {[testenv]commands}
- {envdir}/bin/pip install \
- -v -e "git://github.com/ansible/ansible.git@v2.4.3.0-1#egg=ansible"
- bash -c "export ANSIBLE_LOG_PATH={toxinidir}/ansible_{envname}_`date +%Y_%m_%d__%H_%M_%S`.log ; {posargs:echo 'No commands have been specified. Exiting.'}"
-setenv =
- {[testenv]setenv}
- OPENSHIFT_ANSIBLE_GIT_TAG={env:OPENSHIFT_ANSIBLE_GIT_TAG:''}
-
-[testenv:ocp3.9]
-commands =
- {[testenv]commands}
- {envdir}/bin/pip install \
- -v -e "git://github.com/ansible/ansible.git@v2.4.3.0-1#egg=ansible"
- bash -c "export ANSIBLE_LOG_PATH={toxinidir}/ansible_{envname}_`date +%Y_%m_%d__%H_%M_%S`.log ; {posargs:echo 'No commands have been specified. Exiting.'}"
-setenv =
- {[testenv]setenv}
- OPENSHIFT_ANSIBLE_GIT_TAG={env:OPENSHIFT_ANSIBLE_GIT_TAG:''}
-
-[testenv:ocp3.10]
-commands =
- {[testenv]commands}
- {envdir}/bin/pip install \
- -v -e "git://github.com/ansible/ansible.git@v2.4.6.0-1#egg=ansible"
- bash -c "export ANSIBLE_LOG_PATH={toxinidir}/ansible_{envname}_`date +%Y_%m_%d__%H_%M_%S`.log ; {posargs:echo 'No commands have been specified. Exiting.'}"
-setenv =
- {[testenv]setenv}
- OPENSHIFT_ANSIBLE_GIT_TAG={env:OPENSHIFT_ANSIBLE_GIT_TAG:''}
-
-[testenv:ocp3.11]
-commands =
- {[testenv]commands}
- {envdir}/bin/pip install \
- -v -e "git://github.com/ansible/ansible.git@v2.6.2#egg=ansible"
- bash -c "export ANSIBLE_LOG_PATH={toxinidir}/ansible_{envname}_`date +%Y_%m_%d__%H_%M_%S`.log ; {posargs:echo 'No commands have been specified. Exiting.'}"
-
-setenv =
- {[testenv]setenv}
- OPENSHIFT_ANSIBLE_GIT_TAG={env:OPENSHIFT_ANSIBLE_GIT_TAG:''}