summaryrefslogtreecommitdiffstats
path: root/perf-framework/file_open_test
blob: 83cbe97f029e5ec18e7b058957289b6d9a48c6ab (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
#!/bin/bash -u

CONFIG_FILE=gf_perf_config
source $CONFIG_FILE

SETTLE_TIME=10
RUNFILE=.runfile
STAT_COLLECTOR=stat_collect

# Generate current run, update runfile
if [ ! -f $RUNFILE ]
then
        run=1
else
        run=`cat $RUNFILE`
fi
echo $((run+1)) > $RUNFILE

# Drop vm caches on all the bricks before starting the runs
for brick in $BRICK_IP_ADDRS
do
        ssh -l root $brick "echo 3 > /proc/sys/vm/drop_caches"
done

# Create the gluster volume
./create_gluster_vol

mount | grep $MOUNT_POINT > /dev/null 2>&1
if [ $? -eq 0 ]
then
	umount $MOUNT_POINT
fi

if [ ! -d $MOUNT_POINT ]
then
	mkdir -p $MOUNT_POINT
fi

# Make sure that the fuse kernel module is loaded
/sbin/lsmod | grep -w fuse > /dev/null 2>&1
if [ $? -ne 0 ]
then
	/sbin/modprobe fuse > /dev/null 2>&1
fi

ps -eaf | egrep -w 'glusterfs|glusterfsd|glusterd' > /dev/null 2>&1
if [ $? -eq 0 ]
then
	killall glusterfsd glusterd glusterfs  > /dev/null 2>&1
fi

# Mount the client
# Sleep for a while. Sometimes, NFS mounts fail if attempted soon after creating the volume
sleep $SETTLE_TIME

if [ $ENABLE_ACL == "yes" ]
then
        acl_opts="-o acl"
else
        acl_opts=""
fi

echo "Mounting volume..."
if [ $ENABLE_MEM_ACCT == "yes" ]
then
	echo "Memory accounting status on client -"
	echo "x/x &gf_mem_acct_enable" > commands.$$
	echo "quit" >> commands.$$
	GLUSTERFS_DISABLE_MEM_ACCT=0 mount -t $MOUNT_TYPE $acl_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT
	mount_status=$?
	gdb -q --command=commands.$$ -p `pidof glusterfs` | grep gf_mem_acct_enable | awk '{print $(NF-1) $NF}'
	rm commands.$$ > /dev/null 2>&1
else
	mount -t $MOUNT_TYPE $acl_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT
	mount_status=$?
fi

if [ $mount_status -ne 0 ]
then
        echo "mount -t $MOUNT_TYPE $acl_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT failed..."
        echo "Exiting..."
        exit 1
fi

# Copy statistics collection scripts to the server

echo ""
echo "Copying stat collection script to bricks..."
for brick in $BRICK_IP_ADDRS
do 
	ssh -l root $brick "mkdir -p $SERVER_SCRIPTS_DIR"
	scp -p $STAT_COLLECTOR root@$brick:$SERVER_SCRIPTS_DIR > /dev/null 2>&1
done

# Run statistics collection scripts on the server

echo ""
echo "Starting server stat collection..."
for brick in $BRICK_IP_ADDRS
do 
	ssh -l root $brick "mkdir -p $SERVER_LOG_DIR"
	ssh -l root $brick "$SERVER_SCRIPTS_DIR/$STAT_COLLECTOR $SERVER_LOG_DIR" &
done

# Run statistics collection on client

mkdir -p $LOCAL_LOG_REPO
./$STAT_COLLECTOR $LOCAL_LOG_REPO/run$run/client &

# Start perf test

echo ""
echo "Starting run $run..."

sleep $SETTLE_TIME
./parallel_create 50 10000
sleep $SETTLE_TIME

# Stop statistics collection scripts on the client

killall mpstat vmstat iostat $STAT_COLLECTOR sar > /dev/null 2>&1

# Stop statistics collection scripts on the server

echo ""
echo "Stopping server stat collection..."
for brick in $BRICK_IP_ADDRS
do 
	ssh -l root $brick killall mpstat vmstat iostat $STAT_COLLECTOR sar > /dev/null 2>&1
done

# Since the ssh processes were backgrounded, they will be hanging around.
# Kill them
kill `jobs -l | awk '{print $2}'` > /dev/null 2>&1

# Copy statistics from the server

echo ""
echo "Copying server logfiles for run $run..."
cur_log_dump_dir=$LOCAL_LOG_REPO/run$run
mkdir -p $cur_log_dump_dir
count=1
for brick in $BRICK_IP_ADDRS
do
        for statf in mpstat vmstat iostat sysinfo sar_netstat
        do
                scp root@$brick:$SERVER_LOG_DIR/*$statf* $cur_log_dump_dir/brick$count-$brick-$statf-log > /dev/null 2>&1
        done
        count=$((count + 1))
done

# Cleanup statistics collected for this run on the server

echo ""
echo "Cleaning server logfiles..."
for brick in $BRICK_IP_ADDRS
do 
	ssh -l root $brick "cd $SERVER_LOG_DIR; rm mpstat_log vmstat_log iostat_log sysinfo sar_netstat_log"
done