1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
---
- hosts: cns
gather_facts: yes
become: no
vars_files:
- vars/main.yaml
roles:
- setup-custom-domain-names
- instance-groups
- package-repos
- vmware-guest-setup
- cloud-provider-setup
- docker-storage-setup
- openshift-volume-quota
- gluster-ports
# 'openshift_node_groups' var started being required since OCP3.10
- hosts: allnodes
gather_facts: no
become: no
tasks:
- set_fact:
openshift_crio_docker_gc_node_selector:
runtime: crio
openshift_node_groups:
- name: node-config-master
labels:
- 'node-role.kubernetes.io/master=true'
- 'role=master'
edits: []
- name: node-config-master-crio
labels:
- 'node-role.kubernetes.io/master=true'
- 'role=master'
- 'runtime=cri-o'
edits:
- key: kubeletArguments.container-runtime
value: ["remote"]
- key: kubeletArguments.container-runtime-endpoint
value: ["/var/run/crio/crio.sock"]
- key: kubeletArguments.image-service-endpoint
value: ["/var/run/crio/crio.sock"]
- key: kubeletArguments.runtime-request-timeout
value: ["10m"]
- name: node-config-compute
labels:
- 'node-role.kubernetes.io/compute=true'
- 'node-role.kubernetes.io/infra=true'
- 'role=compute'
edits: []
- name: node-config-compute-crio
labels:
- 'node-role.kubernetes.io/compute=true'
- 'node-role.kubernetes.io/infra=true'
- 'role=compute'
- 'runtime=cri-o'
edits:
- key: kubeletArguments.container-runtime
value: ["remote"]
- key: kubeletArguments.container-runtime-endpoint
value: ["/var/run/crio/crio.sock"]
- key: kubeletArguments.image-service-endpoint
value: ["/var/run/crio/crio.sock"]
- key: kubeletArguments.runtime-request-timeout
value: ["10m"]
- name: node-config-storage
labels:
- 'node-role.kubernetes.io/storage=true'
- 'role=storage'
edits: []
- name: node-config-storage-crio
labels:
- 'node-role.kubernetes.io/storage=true'
- 'role=storage'
- 'runtime=cri-o'
edits:
- key: kubeletArguments.container-runtime
value: ["remote"]
- key: kubeletArguments.container-runtime-endpoint
value: ["/var/run/crio/crio.sock"]
- key: kubeletArguments.image-service-endpoint
value: ["/var/run/crio/crio.sock"]
- key: kubeletArguments.runtime-request-timeout
value: ["10m"]
- include: add-node-prerequisite.yaml
when: openshift_vers in ['v3_6', 'v3_7']
- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
'noop.yaml',
lookup('env', 'VIRTUAL_ENV') +
'/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
) }} hosts=new_nodes"
when: openshift_vers not in ['v3_6', 'v3_7']
- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
'noop.yaml',
lookup('env', 'VIRTUAL_ENV') +
'/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
) }} hosts=new_nodes"
when: openshift_vers not in ['v3_6', 'v3_7']
- name: Map domain names and IP addresses of old and new nodes to each other
hosts: master, compute, crs
vars_files:
- vars/main.yaml
roles:
- setup-custom-domain-names
- hosts: allnodes
gather_facts: no
become: no
tasks:
- name: Make sure dnsmasq is running, enabled and restarted
service: name=dnsmasq state=restarted enabled=yes
- hosts: localhost
gather_facts: no
become: no
roles:
- yum-update-and-reboot
- hosts: single_master
gather_facts: no
tasks:
- name: Make sure oc client is responsive
command: oc status
retries: 120
delay: 5
register: oc_status_result
until: oc_status_result is succeeded
|