summaryrefslogtreecommitdiffstats
path: root/tests/functional/common/arbiter/test_arbiter.py
blob: 6c4674af68568542c90ea2bc1fc2094afec6d734 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
import ddt

from cnslibs.cns import cns_baseclass
from cnslibs.common import heketi_ops
from cnslibs.common.openshift_ops import (
    get_gluster_vol_info_by_pvc_name,
    get_ocp_gluster_pod_names,
    oc_create_pvc,
    oc_create_tiny_pod_with_volume,
    oc_delete,
    resize_pvc,
    verify_pvc_size,
    verify_pvc_status_is_bound,
    wait_for_pod_be_ready,
    wait_for_resource_absence,
)


@ddt.ddt
class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):

    def setUp(self):
        super(TestArbiterVolumeCreateExpandDelete, self).setUp()

        self.node = self.ocp_master_node[0]
        self.sc = self.cns_storage_class.get(
            'storage_class1', self.cns_storage_class.get('file_storage_class'))

        # Mark one of the Heketi nodes as arbiter-supported if none of
        # existent nodes or devices already enabled to support it.
        self.heketi_server_url = self.cns_storage_class.get(
            'storage_class1',
            self.cns_storage_class.get('file_storage_class'))['resturl']
        arbiter_tags = ('required', 'supported')
        arbiter_already_supported = False

        self.node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in self.node_id_list[::-1]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
                arbiter_already_supported = True
                break
            for device in node_info['devices'][::-1]:
                if device.get('tags', {}).get('arbiter') in arbiter_tags:
                    arbiter_already_supported = True
                    break
            else:
                continue
            break
        if not arbiter_already_supported:
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url,
                'node', self.node_id_list[0], 'supported')

    def _set_arbiter_tag_with_further_revert(self, node, server_url,
                                             source, source_id, tag_value,
                                             revert_to=None):
        if tag_value is None:
            # Remove arbiter tag logic
            heketi_ops.rm_arbiter_tag(node, server_url, source, source_id)
            if revert_to is not None:
                self.addCleanup(heketi_ops.set_arbiter_tag,
                                node, server_url, source, source_id, revert_to)
        else:
            # Add arbiter tag logic
            heketi_ops.set_arbiter_tag(
                node, server_url, source, source_id, tag_value)
            if revert_to is not None:
                self.addCleanup(heketi_ops.set_arbiter_tag,
                                node, server_url, source, source_id, revert_to)
            else:
                self.addCleanup(heketi_ops.rm_arbiter_tag,
                                node, server_url, source, source_id)

    def verify_amount_and_proportion_of_arbiter_and_data_bricks(
            self, vol_info, arbiter_bricks=1, data_bricks=2):
        # Verify amount and proportion of arbiter and data bricks
        bricks_list = vol_info['bricks']['brick']
        bricks = {
            'arbiter_list': [],
            'data_list': [],
            'arbiter_amount': 0,
            'data_amount': 0
        }

        for brick in bricks_list:
            if int(brick['isArbiter']) == 1:
                bricks['arbiter_list'].append(brick)
            else:
                bricks['data_list'].append(brick)

        bricks['arbiter_amount'] = len(bricks['arbiter_list'])
        bricks['data_amount'] = len(bricks['data_list'])

        self.assertGreaterEqual(
            bricks['arbiter_amount'], arbiter_bricks,
            "Arbiter brick amount is expected to be Greater or Equal to %s. "
            "Actual amount is '%s'." % (
                arbiter_bricks, bricks['arbiter_amount']))

        self.assertGreaterEqual(
            bricks['data_amount'], data_bricks,
            "Data brick amount is expected to be Greater or Equal to %s. "
            "Actual amount is '%s'." % (data_bricks, bricks['data_amount']))

        self.assertEqual(
            bricks['data_amount'],
            (bricks['arbiter_amount'] * 2),
            "Expected 1 arbiter brick per 2 data bricks. "
            "Arbiter brick amount is '%s', Data brick amount is '%s'." % (
                bricks['arbiter_amount'], bricks['data_amount'])
        )

        return bricks

    def test_arbiter_pvc_create(self):
        """Test case CNS-944"""

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        # Get vol info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)

    def test_arbiter_pvc_mount_on_pod(self):
        """Test case CNS-945"""

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        # Create POD with attached volume
        mount_path = "/mnt"
        pod_name = oc_create_tiny_pod_with_volume(
            self.node, self.pvc_name, "test-arbiter-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(oc_delete, self.node, 'pod', pod_name)

        # Wait for POD be up and running
        wait_for_pod_be_ready(self.node, pod_name, timeout=60, wait_step=2)

        # Get volume ID
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
        vol_id = vol_info["gluster_vol_id"]

        # Verify that POD has volume mounted on it
        cmd = "oc exec {0} -- df -PT {1} | grep {1}".format(
            pod_name, mount_path)
        out = self.cmd_run(cmd)
        err_msg = ("Failed to get info about mounted '%s' volume. "
                   "Output is empty." % vol_id)
        self.assertTrue(out, err_msg)

        # Verify volume data on POD
        # Filesystem  Type           Size    Used  Avail   Cap Mounted on
        # IP:vol_id   fuse.glusterfs 1038336 33408 1004928  3% /mnt
        data = [s for s in out.strip().split(' ') if s]
        actual_vol_id = data[0].split(':')[-1]
        self.assertEqual(
            vol_id, actual_vol_id,
            "Volume ID does not match: expected is "
            "'%s' and actual is '%s'." % (vol_id, actual_vol_id))
        self.assertIn(
            "gluster", data[1],
            "Filesystem type is expected to be of 'glusterfs' type. "
            "Actual value is '%s'." % data[1])
        self.assertEqual(
            mount_path, data[6],
            "Unexpected mount path. Expected is '%s' and actual is '%s'." % (
                mount_path, data[6]))
        max_size = 1024 ** 2
        total_size = int(data[2])
        self.assertLessEqual(
            total_size, max_size,
            "Volume has bigger size '%s' than expected - '%s'." % (
                total_size, max_size))
        min_available_size = int(max_size * 0.93)
        available_size = int(data[4])
        self.assertLessEqual(
            min_available_size, available_size,
            "Minimum available size (%s) not satisfied. Actual is '%s'." % (
                min_available_size, available_size))

        # Write data on mounted volume
        write_data_cmd = (
            "dd if=/dev/zero of=%s/file$i bs=%s count=1; " % (
                mount_path, available_size))
        self.cmd_run(write_data_cmd)

    def test_create_arbiter_vol_with_more_than_one_brick_set(self):
        """Test case CNS-942"""

        # Set arbiter:disabled tag to the data devices and get their info
        data_nodes = []
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)

            if len(node_info['devices']) < 2:
                self.skipTest(
                    "Nodes are expected to have at least 2 devices")
            if not all([int(d['storage']['free']) > (3 * 1024**2)
                        for d in node_info['devices'][0:2]]):
                self.skipTest(
                    "Devices are expected to have more than 3Gb of free space")
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url,
                    'device', device['id'], 'disabled',
                    device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url,
                'node', node_id, 'disabled',
                node_info.get('tags', {}).get('arbiter'))

            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url,
                'node', node_id, 'required',
                node_info.get('tags', {}).get('arbiter'))
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url,
                    'device', device['id'], 'required',
                    device.get('tags', {}).get('arbiter'))

        # Get second big volume between 2 data nodes and use it
        # for target vol calculation.
        for i, node_info in enumerate(data_nodes):
            biggest_disk_free_space = 0
            for device in node_info['devices'][0:2]:
                free = int(device['storage']['free'])
                if free > biggest_disk_free_space:
                    biggest_disk_free_space = free
            data_nodes[i]['biggest_free_space'] = biggest_disk_free_space
        target_vol_size_kb = 1 + min([
            n['biggest_free_space'] for n in data_nodes])

        # Check that all the data devices have, at least, half of required size
        all_big_enough = True
        for node_info in data_nodes:
            for device in node_info['devices'][0:2]:
                if float(device['storage']['free']) < (target_vol_size_kb / 2):
                    all_big_enough = False
                    break

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create helper arbiter vol if not all the data devices have
        # half of required free space.
        if not all_big_enough:
            helper_vol_size_kb, target_vol_size_kb = 0, 0
            smaller_device_id = None
            for node_info in data_nodes:
                devices = node_info['devices']
                if ((devices[0]['storage']['free']) > (
                        devices[1]['storage']['free'])):
                    smaller_device_id = devices[1]['id']
                    smaller_device = devices[1]['storage']['free']
                    bigger_device = devices[0]['storage']['free']
                else:
                    smaller_device_id = devices[0]['id']
                    smaller_device = devices[0]['storage']['free']
                    bigger_device = devices[1]['storage']['free']
                diff = bigger_device - (2 * smaller_device) + 1
                if diff > helper_vol_size_kb:
                    helper_vol_size_kb = diff
                    target_vol_size_kb = bigger_device - diff

            # Disable smaller device and create helper vol on bigger one
            # to reduce its size, then enable smaller device back.
            try:
                out = heketi_ops.heketi_device_disable(
                    self.heketi_client_node, self.heketi_server_url,
                    smaller_device_id)
                self.assertTrue(out)
                self.create_and_wait_for_pvc(
                    int(helper_vol_size_kb / 1024.0**2) + 1)
            finally:
                out = heketi_ops.heketi_device_enable(
                    self.heketi_client_node, self.heketi_server_url,
                    smaller_device_id)
                self.assertTrue(out)

        # Create target arbiter volume
        self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2))

        # Get gluster volume info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                vol_info, arbiter_bricks=2, data_bricks=4)

    # NOTE(vponomar): do not create big volumes setting value less than 64
    # for 'avg_file_size'. It will cause creation of very huge amount of files
    # making one test run very loooooooong.
    @ddt.data(
        (2, 0), # noqa: equivalent of 64KB of avg size
        (1, 4),
        (2, 64),
        (3, 128),
        (3, 256),
        (5, 512),
        (5, 1024),
        (5, 10240),
        (10, 1024000),
    )
    @ddt.unpack
    def test_verify_arbiter_brick_able_to_contain_expected_amount_of_files(
            self, pvc_size_gb, avg_file_size):
        """Test cases CNS-1182-1190"""

        # Create sc with gluster arbiter info
        self.create_storage_class(
            is_arbiter_vol=True, arbiter_avg_file_size=avg_file_size)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc(pvc_size_gb)

        # Get volume info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        # Verify proportion of data and arbiter bricks
        bricks_info = (
            self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                vol_info))

        expected_file_amount = pvc_size_gb * 1024**2 / (avg_file_size or 64)
        expected_file_amount = (expected_file_amount /
                                bricks_info['arbiter_amount'])

        # Try to create expected amount of files on arbiter brick mount
        passed_arbiter_bricks = []
        not_found = "Mount Not Found"
        gluster_pods = get_ocp_gluster_pod_names(self.node)
        for brick in bricks_info['arbiter_list']:
            for gluster_pod in gluster_pods:
                # "brick path" looks like following:
                # ip_addr:/path/to/vg/brick_unique_name/brick
                # So, we remove "ip_addr" and "/brick" parts to have mount path
                brick_path = brick["name"].split(":")[-1]
                cmd = "oc exec %s -- mount | grep %s || echo '%s'" % (
                    gluster_pod, brick_path[0:-6], not_found)
                out = self.cmd_run(cmd)
                if out != not_found:
                    cmd = (
                        "oc exec %s -- python -c \"["
                        "    open('%s/foo_file{0}'.format(i), 'a').close()"
                        "    for i in range(%s)"
                        "]\"" % (gluster_pod, brick_path, expected_file_amount)
                    )
                    out = self.cmd_run(cmd)
                    passed_arbiter_bricks.append(brick_path)
                    break

        # Make sure all the arbiter bricks were checked
        for brick in bricks_info['arbiter_list']:
            self.assertIn(
                brick["name"].split(":")[-1], passed_arbiter_bricks,
                "Arbiter brick '%s' was not verified. Looks like it was "
                "not found on any of gluster nodes." % brick_path)

    @ddt.data(True, False)
    def test_aribiter_required_tag_on_node_or_devices_other_disabled(
            self, node_with_tag):
        """Test cases CNS-989 and CNS-997"""

        pvc_amount = 3

        # Get Heketi nodes info
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        # Set arbiter:required tags
        arbiter_node = heketi_ops.heketi_node_info(
            self.heketi_client_node, self.heketi_server_url, node_id_list[0],
            json=True)
        arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage']
        self._set_arbiter_tag_with_further_revert(
            self.heketi_client_node, self.heketi_server_url, 'node',
            node_id_list[0], ('required' if node_with_tag else None),
            revert_to=arbiter_node.get('tags', {}).get('arbiter'))
        for device in arbiter_node['devices']:
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'device',
                device['id'], (None if node_with_tag else 'required'),
                revert_to=device.get('tags', {}).get('arbiter'))

        # Set arbiter:disabled tags
        data_nodes, data_nodes_ip_addresses = [], []
        for node_id in node_id_list[1:]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            if not any([int(d['storage']['free']) > (pvc_amount * 1024**2)
                        for d in node_info['devices']]):
                self.skipTest(
                    "Devices are expected to have more than "
                    "%sGb of free space" % pvc_amount)
            data_nodes_ip_addresses.extend(node_info['hostnames']['storage'])
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], (None if node_with_tag else 'disabled'),
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, ('disabled' if node_with_tag else None),
                revert_to=node_info.get('tags', {}).get('arbiter'))
            data_nodes.append(node_info)

        # Create PVCs and check that their bricks are correctly located
        self.create_storage_class(is_arbiter_vol=True)
        for i in range(pvc_amount):
            self.create_and_wait_for_pvc(1)

            # Get gluster volume info
            vol_info = get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            arbiter_bricks, data_bricks = [], []
            for brick in vol_info['bricks']['brick']:
                if int(brick["isArbiter"]) == 1:
                    arbiter_bricks.append(brick["name"])
                else:
                    data_bricks.append(brick["name"])

            # Verify that all the arbiter bricks are located on
            # arbiter:required node and data bricks on all other nodes only.
            for arbiter_brick in arbiter_bricks:
                self.assertIn(
                    arbiter_brick.split(':')[0], arbiter_nodes_ip_addresses)
            for data_brick in data_bricks:
                self.assertIn(
                    data_brick.split(':')[0], data_nodes_ip_addresses)

    def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
        """Test case CNS-1265"""
        min_storage_gb = 10

        # Set arbiter:disabled tags to the first 2 nodes
        data_nodes = []
        biggest_disks = []
        self.assertGreater(len(self.node_id_list), 2)
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            biggest_disk_free_space = 0
            for device in node_info['devices']:
                disk_free_space = int(device['storage']['free'])
                if disk_free_space < (min_storage_gb * 1024**2):
                    self.skipTest(
                        "Devices are expected to have more than "
                        "%sGb of free space" % min_storage_gb)
                if disk_free_space > biggest_disk_free_space:
                    biggest_disk_free_space = disk_free_space
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'disabled',
                    revert_to=device.get('tags', {}).get('arbiter'))
            biggest_disks.append(biggest_disk_free_space)
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled',
                revert_to=node_info.get('tags', {}).get('arbiter'))
            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        arbiter_nodes = []
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'required',
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'required',
                revert_to=node_info.get('tags', {}).get('arbiter'))
            arbiter_nodes.append(node_info)

        # Calculate size and amount of volumes to be created
        pvc_size = int(min(biggest_disks) / 1024**2)
        pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create and delete 3 small volumes concurrently
        pvc_names = []
        for i in range(3):
            pvc_name = oc_create_pvc(
                self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
                pvc_size=int(pvc_size / 3))
            pvc_names.append(pvc_name)
        exception_exists = False
        for pvc_name in pvc_names:
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                for pvc_name in pvc_names:
                    self.addCleanup(
                        wait_for_resource_absence, self.node, 'pvc', pvc_name)
                for pvc_name in pvc_names:
                    self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                exception_exists = True
        if exception_exists:
            raise
        for pvc_name in pvc_names:
            oc_delete(self.node, 'pvc', pvc_name)
        for pvc_name in pvc_names:
            wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Create and delete big volumes in a loop
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
                pvc_size=pvc_size)
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                self.addCleanup(
                    wait_for_resource_absence, self.node, 'pvc', pvc_name)
                self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                raise
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)

    def test_arbiter_volume_expand_using_pvc(self):
        """Test case CNS-954"""
        # Create sc with gluster arbiter info
        self.create_storage_class(
            is_arbiter_vol=True, allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        # Get vol info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)

        pvc_size = 2
        resize_pvc(self.node, self.pvc_name, pvc_size)
        verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)