1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
|
import ddt
import pytest
from glusto.core import Glusto as g
from glustolibs.gluster import block_libs
from openshiftstoragelibs import baseclass
from openshiftstoragelibs import command
from openshiftstoragelibs import heketi_ops
from openshiftstoragelibs import node_ops
from openshiftstoragelibs import openshift_ops
from openshiftstoragelibs import openshift_storage_libs
from openshiftstoragelibs import podcmd
@ddt.ddt
class TestDevPathMapping(baseclass.GlusterBlockBaseClass):
'''Class that contain dev path mapping test cases for
gluster file & block volumes
'''
def setUp(self):
super(TestDevPathMapping, self).setUp()
self.node = self.ocp_master_node[0]
self.h_node, self.h_server = (
self.heketi_client_node, self.heketi_server_url)
h_nodes_list = heketi_ops.heketi_node_list(self.h_node, self.h_server)
h_node_count = len(h_nodes_list)
if h_node_count < 3:
self.skipTest(
"At least 3 nodes are required, found {}".format(
h_node_count))
# Disable 4th and other nodes
for node_id in h_nodes_list[3:]:
self.addCleanup(
heketi_ops.heketi_node_enable,
self.h_node, self.h_server, node_id)
heketi_ops.heketi_node_disable(
self.h_node, self.h_server, node_id)
h_info = heketi_ops.heketi_node_info(
self.h_node, self.h_server, h_nodes_list[0], json=True)
self.assertTrue(
h_info, "Failed to get the heketi node info for node id"
" {}".format(h_nodes_list[0]))
self.node_ip = h_info['hostnames']['storage'][0]
self.node_hostname = h_info["hostnames"]["manage"][0]
self.vm_name = node_ops.find_vm_name_by_ip_or_hostname(
self.node_hostname)
self.devices_list = [device['name'] for device in h_info["devices"]]
# Get list of additional devices for one of the Gluster nodes
for gluster_server in list(g.config["gluster_servers"].values()):
if gluster_server['storage'] == self.node_ip:
additional_device = gluster_server.get("additional_devices")
if additional_device:
self.devices_list.extend(additional_device)
# sort the devices list
self.devices_list.sort()
def _cleanup_heketi_volumes(self, existing_volumes):
"""Cleanup created BHV and BV"""
volumes = heketi_ops.heketi_volume_list(
self.h_node, self.h_server, json=True).get("volumes")
new_volumes = list(set(volumes) - set(existing_volumes))
for volume in new_volumes:
h_vol_info = heketi_ops.heketi_volume_info(
self.h_node, self.h_server, volume, json=True)
if h_vol_info.get("block"):
for block_vol in (
h_vol_info.get("blockinfo").get("blockvolume")):
heketi_ops.heketi_blockvolume_delete(
self.h_node, self.h_server, block_vol,
raise_on_error=False)
heketi_ops.heketi_volume_delete(
self.h_node, self.h_server, volume, raise_on_error=False)
@pytest.mark.tier4
@podcmd.GlustoPod()
def test_dev_path_block_volume_create(self):
"""Validate dev path mapping for block volumes"""
pvc_size, pvc_amount = 2, 5
pvs_info_before = openshift_storage_libs.get_pvs_info(
self.node, self.node_ip, self.devices_list, raise_on_error=False)
self.detach_and_attach_vmdk(
self.vm_name, self.node_hostname, self.devices_list)
pvs_info_after = openshift_storage_libs.get_pvs_info(
self.node, self.node_ip, self.devices_list, raise_on_error=False)
# Compare pvs info before and after
for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
pvs_info_before[:-1], pvs_info_after[1:]):
self.assertEqual(
uuid, _uuid, "pv_uuid check failed. Expected:{},"
"Actual: {}".format(uuid, _uuid))
self.assertEqual(
vg_name, _vg_name, "vg_name check failed. Expected:"
"{}, Actual:{}".format(vg_name, _vg_name))
# Create block volumes
pvcs = self.create_and_wait_for_pvcs(
pvc_size=pvc_size, pvc_amount=pvc_amount)
self.create_dcs_with_pvc(pvcs)
self.validate_block_volumes_count(
self.h_node, self.h_server, self.node_ip)
def _get_space_use_percent_in_app_pod(self, pod_name):
"""Check if IO's are running in the app pod"""
use_percent = []
cmd = "oc exec {} -- df -h /mnt | tail -1"
# Run 10 times to track the percentage used
for _ in range(10):
out = command.cmd_run(cmd.format(pod_name), self.node).split()[3]
self.assertTrue(
out, "Failed to fetch mount point details from the pod "
"{}".format(pod_name))
use_percent.append(out[:-1])
return use_percent
def _create_app_pod_and_verify_pvs(self):
"""Create block volume with app pod and verify IO's. Compare path,
uuid, vg_name.
"""
pvc_size, pvc_amount = 2, 1
# Space to use for io's in KB
space_to_use = 104857600
# Create block volumes
pvc_name = self.create_and_wait_for_pvcs(
pvc_size=pvc_size, pvc_amount=pvc_amount)
# Create dcs and app pods with I/O on it
dc_name = self.create_dcs_with_pvc(pvc_name, space_to_use=space_to_use)
# Pod names list
pod_name = [pod_name for _, pod_name in list(dc_name.values())][0]
self.assertTrue(
pod_name, "Failed to get the pod name from {}".format(dc_name))
# Fetch dc list
dc_name = [pod for pod, _ in list(dc_name.values())][0]
pvs_info_before = openshift_storage_libs.get_pvs_info(
self.node, self.node_ip, self.devices_list, raise_on_error=False)
# Check if IO's are running
use_percent_before = self._get_space_use_percent_in_app_pod(pod_name)
# Compare volumes
self.validate_file_volumes_count(
self.h_node, self.h_server, self.node_ip)
self.detach_and_attach_vmdk(
self.vm_name, self.node_hostname, self.devices_list)
# Check if IO's are running
use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
self.assertNotEqual(
use_percent_before, use_percent_after,
"Failed to execute IO's in the app pod {}".format(
pod_name))
pvs_info_after = openshift_storage_libs.get_pvs_info(
self.node, self.node_ip, self.devices_list, raise_on_error=False)
# Compare pvs info before and after
for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
pvs_info_before[:-1], pvs_info_after[1:]):
self.assertEqual(
uuid, _uuid, "pv_uuid check failed. Expected: {},"
" Actual: {}".format(uuid, _uuid))
self.assertEqual(
vg_name, _vg_name, "vg_name check failed. Expected: {},"
" Actual:{}".format(vg_name, _vg_name))
return pod_name, dc_name, use_percent_before
@pytest.mark.tier4
@podcmd.GlustoPod()
def test_dev_path_mapping_app_pod_with_block_volume_reboot(self):
"""Validate dev path mapping for app pods with block volume after reboot
"""
# Create block volume with app pod and verify IO's
# and Compare path, uuid, vg_name
pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
# Delete app pods
openshift_ops.oc_delete(self.node, 'pod', pod_name)
openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
# Wait for the new app pod to come up
self.assertTrue(
dc_name, "Failed to get the dc name from {}".format(dc_name))
pod_name = openshift_ops.get_pod_name_from_dc(self.node, dc_name)
openshift_ops.wait_for_pod_be_ready(self.node, pod_name)
# Check if IO's are running after respin of app pod
use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
self.assertNotEqual(
use_percent, use_percent_after,
"Failed to execute IO's in the app pod {} after respin".format(
pod_name))
@pytest.mark.tier4
@podcmd.GlustoPod()
def test_dev_path_block_volume_delete(self):
"""Validate device path name changes the deletion of
already existing file volumes
"""
pvc_size, pvc_amount = 2, 5
pvc_names, gluster_block_list, vol_details = [], [], []
# Fetch BHV list
h_bhv_list_before = heketi_ops.get_block_hosting_volume_list(
self.h_node, self.h_server).keys()
# Create storage class
sc_name = self.create_storage_class()
# Delete created BHV and BV as cleanup during failures
self.addCleanup(self._cleanup_heketi_volumes, h_bhv_list_before)
# Create PVC's
for i in range(0, pvc_amount):
pvc_name = openshift_ops.oc_create_pvc(
self.node, sc_name, pvc_size=pvc_size)
pvc_names.append(pvc_name)
self.addCleanup(
openshift_ops.wait_for_resource_absence,
self.node, 'pvc', pvc_name)
self.addCleanup(
openshift_ops.oc_delete, self.node, 'pvc', pvc_name,
raise_on_absence=False)
# Wait for PVC's to be bound
openshift_ops.wait_for_pvc_be_bound(self.node, pvc_names)
# Get volume name list
for pvc_name in pvc_names:
pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
volume_name = openshift_ops.get_vol_names_from_pv(
self.node, pv_name, vol_type='block')
vol_details.append(volume_name)
# Get BHV list after BV creation
h_bhv_list_after = heketi_ops.get_block_hosting_volume_list(
self.h_node, self.h_server).keys()
self.assertTrue(h_bhv_list_after, "Failed to get the BHV list")
# Validate BV's count
self.validate_block_volumes_count(
self.h_node, self.h_server, self.node_ip)
# Collect pvs info and detach disks and collect pvs info
pvs_info_before = openshift_storage_libs.get_pvs_info(
self.node, self.node_ip, self.devices_list, raise_on_error=False)
self.detach_and_attach_vmdk(
self.vm_name, self.node_hostname, self.devices_list)
pvs_info_after = openshift_storage_libs.get_pvs_info(
self.node, self.node_ip, self.devices_list, raise_on_error=False)
# Compare pvs info before and after
for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
pvs_info_before[:-1], pvs_info_after[1:]):
self.assertEqual(
uuid, _uuid, "pv_uuid check failed. Expected:{},"
"Actual: {}".format(uuid, _uuid))
self.assertEqual(
vg_name, _vg_name, "vg_name check failed. Expected:"
"{}, Actual:{}".format(vg_name, _vg_name))
# Delete created PVC's
for pvc_name in pvc_names:
openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
# Wait for pvc to get deleted
openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)
# Get existing BHV list
for bhv_name in h_bhv_list_after:
b_list = block_libs.get_block_list(self.node_ip, volname=bhv_name)
self.assertIsNotNone(
gluster_block_list, "Failed to get gluster block list")
gluster_block_list.append(b_list)
# Get list of block volumes using heketi
h_blockvol_list = heketi_ops.heketi_blockvolume_list(
self.h_node, self.h_server, json=True)
# Validate volumes created are not present
for vol in vol_details:
self.assertNotIn(
vol, gluster_block_list,
"Failed to delete volume {}".format(vol))
self.assertNotIn(
vol, h_blockvol_list['blockvolumes'],
"Failed to delete blockvolume '{}'".format(vol))
|