summaryrefslogtreecommitdiffstats
path: root/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
blob: f621a8609cb900ad6b63a1b75f232084e292a8e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
try:
    # py2/3
    import simplejson as json
except ImportError:
    # py2
    import json
import re
import time

from glusto.core import Glusto as g
from glustolibs.gluster.block_ops import block_list
from glustolibs.gluster.heal_libs import is_heal_complete
from glustolibs.gluster.volume_ops import (
    get_volume_status,
    get_volume_list,
    volume_status,
    volume_start,
    volume_stop,
)

from openshiftstoragelibs import exceptions
from openshiftstoragelibs.heketi_ops import heketi_blockvolume_info
from openshiftstoragelibs.openshift_ops import cmd_run_on_gluster_pod_or_node
from openshiftstoragelibs import podcmd
from openshiftstoragelibs import waiter


@podcmd.GlustoPod()
def wait_to_heal_complete(
        vol_name=None, g_node="auto_get_gluster_endpoint",
        timeout=300, wait_step=5):
    """Monitors heal for volumes on gluster

    Args:
        vol_name (str): Name of the gluster volume else default is None and
            will check for all the volumes
        g_node (str): Name of the gluster node else default is
            auto_get_gluster_endpoint
        timeout (int): Time to wait for heal check to complete default is 300
        wait_step (int): Time to trigger heal check command for next iteration
    Raises:
        AssertionError: In case heal is not complete
    """
    if not vol_name:
        gluster_vol_list = get_volume_list(g_node)
        if not gluster_vol_list:
            raise AssertionError("failed to get gluster volume list")
    else:
        gluster_vol_list = [vol_name]

    _waiter = waiter.Waiter(timeout=timeout, interval=wait_step)
    for gluster_vol in gluster_vol_list:
        for w in _waiter:
            if is_heal_complete(g_node, gluster_vol):
                # NOTE(vponomar): Reset attempts for waiter to avoid redundant
                # sleep equal to 'interval' on the next usage.
                _waiter._attempt = 0
                break

    if w.expired:
        err_msg = ("reached timeout waiting for all the gluster volumes "
                   "to reach the 'healed' state.")
        g.log.error(err_msg)
        raise AssertionError(err_msg)


@podcmd.GlustoPod()
def get_gluster_vol_status(file_vol, is_detail=False):
    """Get Gluster vol status.

    Args:
        file_vol (str): file volume name.
        is_detail (bool): True for detailed output else False
    """
    # Get Gluster vol info
    options = 'detail' if is_detail else ''
    gluster_volume_status = get_volume_status(
        "auto_get_gluster_endpoint", file_vol, options=options)
    if not gluster_volume_status:
        raise AssertionError("Failed to get volume status for gluster "
                             "volume '%s'" % file_vol)
    if file_vol in gluster_volume_status:
        gluster_volume_status = gluster_volume_status.get(file_vol)
    return gluster_volume_status


@podcmd.GlustoPod()
def get_gluster_vol_hosting_nodes(file_vol):
    """Get Gluster vol hosting nodes.

    Args:
        file_vol (str): file volume name.
    """
    vol_status = get_gluster_vol_status(file_vol)
    g_nodes = []
    for g_node, g_node_data in vol_status.items():
        for process_name, process_data in g_node_data.items():
            if not process_name.startswith("/var"):
                continue
            g_nodes.append(g_node)
    return g_nodes


@podcmd.GlustoPod()
def restart_gluster_vol_brick_processes(ocp_client_node, file_vol,
                                        gluster_nodes):
    """Restarts brick process of a file volume.

    Args:
        ocp_client_node (str): Node to execute OCP commands on.
        file_vol (str): file volume name.
        gluster_nodes (str/list): One or several IPv4 addresses of Gluster
            nodes, where 'file_vol' brick processes must be recreated.
    """
    if not isinstance(gluster_nodes, (list, set, tuple)):
        gluster_nodes = [gluster_nodes]

    # Get Gluster vol brick PIDs
    gluster_volume_status = get_gluster_vol_status(file_vol)
    pids = []
    for gluster_node in gluster_nodes:
        pid = None
        for g_node, g_node_data in gluster_volume_status.items():
            if g_node != gluster_node:
                continue
            for process_name, process_data in g_node_data.items():
                if not process_name.startswith("/var"):
                    continue
                pid = process_data["pid"]
                # When birck is down, pid of the brick is returned as -1.
                # Which is unexepeted situation. So, add appropriate assertion.
                assert pid != "-1", (
                    "Got unexpected PID (-1) for '%s' gluster vol on '%s' "
                    "node." % file_vol, gluster_node)
        assert pid, ("Could not find 'pid' in Gluster vol data for '%s' "
                     "Gluster node. Data: %s" % (
                         gluster_node, gluster_volume_status))
        pids.append((gluster_node, pid))

    # Restart Gluster vol brick processes using found PIDs
    for gluster_node, pid in pids:
        cmd = "kill -9 %s" % pid
        cmd_run_on_gluster_pod_or_node(ocp_client_node, cmd, gluster_node)

    # Wait for Gluster vol brick processes to be recreated
    for gluster_node, pid in pids:
        killed_pid_cmd = "ps -eaf | grep %s | grep -v grep | awk '{print $2}'"
        _waiter = waiter.Waiter(timeout=60, interval=2)
        for w in _waiter:
            result = cmd_run_on_gluster_pod_or_node(
                ocp_client_node, killed_pid_cmd, gluster_node)
            if result.strip() == pid:
                continue
            g.log.info("Brick process '%s' was killed successfully on '%s'" % (
                pid, gluster_node))
            break
        if w.expired:
            error_msg = ("Process ID '%s' still exists on '%s' after waiting "
                         "for it 60 seconds to get killed." % (
                             pid, gluster_node))
            g.log.error(error_msg)
            raise exceptions.ExecutionError(error_msg)

    # Start volume after gluster vol brick processes recreation
    ret, out, err = volume_start(
        "auto_get_gluster_endpoint", file_vol, force=True)
    if ret != 0:
        err_msg = "Failed to start gluster volume %s on %s. error: %s" % (
            file_vol, gluster_node, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)


@podcmd.GlustoPod()
def restart_file_volume(file_vol, sleep_time=120):
    """Restart file volume (stop and start volume).

    Args:
        file_vol (str): name of a file volume
    """
    gluster_volume_status = get_volume_status(
        "auto_get_gluster_endpoint", file_vol)
    if not gluster_volume_status:
        raise AssertionError("failed to get gluster volume status")

    g.log.info("Gluster volume %s status\n%s : " % (
        file_vol, gluster_volume_status)
    )

    ret, out, err = volume_stop("auto_get_gluster_endpoint", file_vol)
    if ret != 0:
        err_msg = "Failed to stop gluster volume %s. error: %s" % (
            file_vol, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    # Explicit wait to stop ios and pvc creation for 2 mins
    time.sleep(sleep_time)

    ret, out, err = volume_start(
        "auto_get_gluster_endpoint", file_vol, force=True)
    if ret != 0:
        err_msg = "failed to start gluster volume %s error: %s" % (
            file_vol, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    ret, out, err = volume_status("auto_get_gluster_endpoint", file_vol)
    if ret != 0:
        err_msg = ("Failed to get status for gluster volume %s error: %s" % (
            file_vol, err))
        g.log.error(err_msg)
        raise AssertionError(err_msg)


@podcmd.GlustoPod()
def match_heketi_and_gluster_block_volumes_by_prefix(
        heketi_block_volumes, block_vol_prefix):
    """Match block volumes from heketi and gluster. This function can't
       be used for block volumes with custom prefixes

    Args:
        heketi_block_volumes (list): list of heketi block volumes with
                                     which gluster block volumes need to
                                     be matched
        block_vol_prefix (str): block volume prefix by which the block
                                volumes needs to be filtered
    """
    gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")

    gluster_vol_block_list = []
    for gluster_vol in gluster_vol_list[1:]:
        ret, out, err = block_list("auto_get_gluster_endpoint", gluster_vol)
        try:
            if ret != 0 and json.loads(out)["RESULT"] == "FAIL":
                msg = "failed to get block volume list with error: %s" % err
                g.log.error(msg)
                raise AssertionError(msg)
        except Exception as e:
            g.log.error(e)
            raise

        gluster_vol_block_list.extend([
            block_vol.replace(block_vol_prefix, "")
            for block_vol in json.loads(out)["blocks"]
            if block_vol.startswith(block_vol_prefix)
        ])

    vol_difference = set(gluster_vol_block_list) ^ set(heketi_block_volumes)
    if vol_difference:
        err_msg = "Gluster and Heketi Block volume list match failed"
        err_msg += "\nGluster Volumes: %s, " % gluster_vol_block_list
        err_msg += "\nBlock volumes %s" % heketi_block_volumes
        err_msg += "\nDifference: %s" % vol_difference
        raise AssertionError(err_msg)


@podcmd.GlustoPod()
def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
                                  block_volume, gluster_node=None,
                                  ocp_client_node=None):
    """Returns block hosting volume name of given block volume

    Args:
        heketi_client_node (str): Node on which cmd has to be executed.
        heketi_server_url (str): Heketi server url
        block_volume (str): Block volume of which block hosting volume
                            returned
    Kwargs:
        gluster_node (str): gluster node/pod ip where gluster command can be
                            run
        ocp_client_node (str): OCP client node where oc commands can be run
    Returns:
        str : Name of the block hosting volume for given block volume
    """
    block_vol_info = heketi_blockvolume_info(
        heketi_client_node, heketi_server_url, block_volume
    )

    for line in block_vol_info.splitlines():
        block_hosting_vol_match = re.search(
            "^Block Hosting Volume: (.*)$", line
        )

        if not block_hosting_vol_match:
            continue

        if gluster_node and ocp_client_node:
            cmd = 'gluster volume list'
            gluster_vol_list = cmd_run_on_gluster_pod_or_node(
                ocp_client_node, cmd, gluster_node).split('\n')
        else:
            gluster_vol_list = get_volume_list('auto_get_gluster_endpoint')

        for vol in gluster_vol_list:
            if block_hosting_vol_match.group(1).strip() in vol:
                return vol


@podcmd.GlustoPod()
def match_heketi_and_gluster_volumes_by_prefix(heketi_volumes, prefix):
    """Match volumes from heketi and gluster using given volume name prefix

    Args:
        heketi_volumes (list): List of heketi volumes with which gluster
                               volumes need to be matched
        prefix (str): Volume prefix by which the volumes needs to be filtered
    """
    g_vol_list = get_volume_list("auto_get_gluster_endpoint")
    g_volumes = [
        g_vol.replace(prefix, "")
        for g_vol in g_vol_list if g_vol.startswith(prefix)]

    vol_difference = set(heketi_volumes) ^ set(g_volumes)
    err_msg = ("Heketi and Gluster volume list match failed"
               "Heketi volumes: {}, Gluster Volumes: {},"
               "Difference: {}"
               .format(heketi_volumes, g_volumes, vol_difference))
    assert not vol_difference, err_msg


@podcmd.GlustoPod()
def get_gluster_vol_free_inodes_with_hosts_of_bricks(vol_name):
    """Get the inodes of gluster volume

    Args:
        vol_name (str): Name of the gluster volume
    Returns:
        dict : Host ip mapped with dict of brick processes and free inodes
    Example:
        >>> get_gluster_vol_free_inodes_with_hosts_of_bricks('testvol')
            {   node_ip1:{
                    'brick_process1':'free_inodes',
                    'brick_process2':'free_inodes'},
                node_ip2:{
                    'brick_process1':'free_inodes',
                    'brick_process2':'free_inodes'},
            }
    """
    hosts_with_inodes_info = dict()

    # Get the detailed status of volume
    vol_status = get_gluster_vol_status(vol_name, is_detail=True)

    # Fetch the node ip, brick processes and free inodes from the status
    for g_node, g_node_data in vol_status.items():
        for brick_process, process_data in g_node_data.items():
            if not brick_process.startswith("/var"):
                continue
            if g_node not in hosts_with_inodes_info:
                hosts_with_inodes_info[g_node] = dict()
            inodes_info = {brick_process: process_data["inodesFree"]}
            hosts_with_inodes_info[g_node].update(inodes_info)
    return hosts_with_inodes_info