summaryrefslogtreecommitdiffstats
path: root/tests/basic/gfapi/glfs_xreaddirplus_r.c
blob: 17f4d1b1d5779b0593a50f4c4ecf0416f3b91add (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <glusterfs/api/glfs.h>
#include <glusterfs/api/glfs-handles.h>
#include <time.h>

#define VALIDATE_AND_GOTO_LABEL_ON_ERROR(func, ret, label) do { \
        if (ret < 0) {            \
                fprintf (stderr, "%s : returned error %d (%s)\n", \
                         func, ret, strerror (errno)); \
                goto label; \
        } \
        } while (0)

#define VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR(func, bool_var, ret, label) do { \
        if (!bool_var) {            \
                fprintf (stderr, "%s : returned error (%s)\n", \
                         func, strerror (errno)); \
                ret = -1;                         \
                goto label; \
        } \
        } while (0)

#define MAX_FILES_CREATE 10
#define MAXPATHNAME      512

void
assimilatetime (struct timespec *ts, struct timespec ts_st,
                struct timespec ts_ed)
{
        if ((ts_ed.tv_nsec - ts_st.tv_nsec) < 0) {
                ts->tv_sec += ts_ed.tv_sec - ts_st.tv_sec - 1;
                ts->tv_nsec += 1000000000 + ts_ed.tv_nsec - ts_st.tv_nsec;
        } else {
                ts->tv_sec += ts_ed.tv_sec - ts_st.tv_sec;
                ts->tv_nsec += ts_ed.tv_nsec - ts_st.tv_nsec;
        }

        if (ts->tv_nsec > 1000000000) {
                ts->tv_nsec = ts->tv_nsec - 1000000000;
                ts->tv_sec += 1;
        }

        return;
}

/*
 * Returns '%' difference between ts1 & ts2
 */
int
comparetime (struct timespec ts1, struct timespec ts2)
{
        uint64_t ts1_n, ts2_n;
        int pct = 0;

        ts1_n = (ts1.tv_sec * 1000000000) + ts1.tv_nsec;
        ts2_n = (ts2.tv_sec * 1000000000) + ts2.tv_nsec;

        pct = ((ts1_n - ts2_n)*100)/ts1_n;

        return pct;
}

int
old_readdir (glfs_t *fs)
{
        struct glfs_object     *root = NULL;
        struct glfs_fd         *fd = NULL;
        struct stat            *sb = NULL;
        char buf[512];
        struct dirent          *entry = NULL;
        int                     ret = -1;
        struct glfs_object     *glhandle = NULL;

        if (!fs)
                return -1;

        root = glfs_h_lookupat (fs, NULL, "/", sb, 0);
        VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_h_lookupat", !!root, ret, out);

        fd = glfs_opendir (fs, "/");
        VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_opendir", !!fd, ret, out);

        while (glfs_readdir_r (fd, (struct dirent *)buf, &entry), entry) {
                if (strcmp(entry->d_name, ".") && strcmp(entry->d_name, "..")) {
                        glhandle = glfs_h_lookupat (fs, root, "/", sb, 0);
                        VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_h_lookupat", !!glhandle, ret, out);
                }
        }

        glfs_closedir (fd);

        ret  = 0;
out:
        return ret;
}

int
new_xreaddirplus (glfs_t *fs)
{
        struct glfs_fd         *fd = NULL;
        struct stat            *sb = NULL;
        int                     ret = -1;
        uint32_t                rflags = (GFAPI_XREADDIRP_STAT |
                                          GFAPI_XREADDIRP_HANDLE);
        struct glfs_xreaddirp_stat   *xstat = NULL;
        struct dirent           de;
        struct dirent          *pde = NULL;
        struct glfs_object     *glhandle = NULL;

        if (!fs)
                return -1;

        fd = glfs_opendir (fs, "/");
        VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_opendir", !!fd, ret, out);

        ret = glfs_xreaddirplus_r(fd, rflags, &xstat, &de, &pde);
        while (ret > 0 && pde != NULL) {
                if (xstat) {
                        sb = glfs_xreaddirplus_get_stat (xstat);
                        VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_xreaddirplus_get_stat", !!sb, ret, out);

                        if (strcmp(de.d_name, ".") && strcmp(de.d_name, "..")) {
                                glhandle = glfs_xreaddirplus_get_object (xstat);
                                VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_xreaddirplus_get_object", !!glhandle, ret, out);
                        }
                }

                if (xstat) {
                        glfs_free(xstat);
                        xstat = NULL;
                }

                ret = glfs_xreaddirplus_r(fd, rflags, &xstat, &de, &pde);

                VALIDATE_AND_GOTO_LABEL_ON_ERROR ("glfs_xreaddirp_r", ret, out);

        }

        if (xstat)
                glfs_free(xstat);

        ret  = 0;
out:
        return ret;
}

int
main (int argc, char *argv[])
{
        int                     ret = -1;
        glfs_t                 *fs = NULL;
        char                   *volname = NULL;
        char                   *logfile = NULL;
        char                   *hostname = NULL;
        char                   *my_file = "file_";
        char                    my_file_name[MAXPATHNAME];
        uint32_t                flags = O_RDWR|O_SYNC;
        struct glfs_fd         *fd = NULL;
        int                     i = 0;
        int                     pct = 0;
        struct timespec         timestamp = {0, 0}, st_timestamp, ed_timestamp;
        struct timespec         otimestamp = {0, 0}, ost_timestamp, oed_timestamp;

        if (argc != 4) {
                fprintf (stderr, "Invalid argument\n");
                return 1;
        }

        hostname = argv[1];
        volname = argv[2];
        logfile = argv[3];

        fs = glfs_new (volname);
        VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_new", !!fs, ret, out);

        ret = glfs_set_volfile_server (fs, "tcp", hostname, 24007);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("glfs_set_volfile_server", ret, out);

        ret = glfs_set_logging (fs, logfile, 7);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("glfs_set_logging", ret, out);

        ret = glfs_init (fs);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("glfs_init", ret, out);

        for (i = 0; i < MAX_FILES_CREATE; i++) {
                sprintf (my_file_name, "%s%d", my_file, i);

                fd = glfs_creat(fs, my_file_name, flags, 0644);
                VALIDATE_BOOL_AND_GOTO_LABEL_ON_ERROR ("glfs_creat", !!fd, ret, out);

                glfs_close (fd);
        }

        /* measure performance using old readdir call and new xreaddirplus call and compare */
        ret = clock_gettime (CLOCK_REALTIME, &ost_timestamp);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("clock_gettime", ret, out);

        ret = old_readdir (fs);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("old_readdir", ret, out);

        ret = clock_gettime (CLOCK_REALTIME, &oed_timestamp);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("clock_gettime", ret, out);

        assimilatetime (&otimestamp, ost_timestamp, oed_timestamp);

        printf ("\tOverall time using readdir:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n",
                otimestamp.tv_sec, otimestamp.tv_nsec);


        ret = clock_gettime (CLOCK_REALTIME, &st_timestamp);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("clock_gettime", ret, out);

        ret = new_xreaddirplus (fs);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("new_xreaddirplus", ret, out);

        ret = clock_gettime (CLOCK_REALTIME, &ed_timestamp);
        VALIDATE_AND_GOTO_LABEL_ON_ERROR ("clock_gettime", ret, out);

        assimilatetime (&timestamp, st_timestamp, ed_timestamp);

        printf ("\tOverall time using xreaddirplus:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n",
                timestamp.tv_sec, timestamp.tv_nsec);


        pct = comparetime (otimestamp, timestamp);
        printf ("There is improvement by %d%%\n", pct);

        ret = 0;
out:
        if (fs) {
                ret = glfs_fini(fs);
                if (ret)
                        fprintf (stderr, "glfs_fini(fs) returned %d\n", ret);
        }

        return ret;
}


> -rw-r--r--xlators/cluster/dht/src/dht-hashfn.c146
-rw-r--r--xlators/cluster/dht/src/dht-helper.c3313
-rw-r--r--xlators/cluster/dht/src/dht-inode-read.c2167
-rw-r--r--xlators/cluster/dht/src/dht-inode-write.c1990
-rw-r--r--xlators/cluster/dht/src/dht-layout.c1282
-rw-r--r--xlators/cluster/dht/src/dht-linkfile.c522
-rw-r--r--xlators/cluster/dht/src/dht-lock.c1392
-rw-r--r--xlators/cluster/dht/src/dht-lock.h91
-rw-r--r--xlators/cluster/dht/src/dht-mem-types.h43
-rw-r--r--xlators/cluster/dht/src/dht-messages.h1340
-rw-r--r--xlators/cluster/dht/src/dht-rebalance.c6626
-rw-r--r--xlators/cluster/dht/src/dht-rename.c2832
-rw-r--r--xlators/cluster/dht/src/dht-selfheal.c4027
-rw-r--r--xlators/cluster/dht/src/dht-shared.c1720
-rw-r--r--xlators/cluster/dht/src/dht.c160
-rw-r--r--xlators/cluster/dht/src/nufa.c1100
-rw-r--r--xlators/cluster/dht/src/switch.c1479
-rw-r--r--xlators/cluster/dht/src/tier.c1307
-rw-r--r--xlators/cluster/dht/src/tier.h67
-rw-r--r--xlators/cluster/dht/src/unittest/dht_layout_mock.c39
-rw-r--r--xlators/cluster/dht/src/unittest/dht_layout_unittest.c26
-rw-r--r--xlators/cluster/ec/src/Makefile.am38
-rw-r--r--xlators/cluster/ec/src/ec-code-avx.c109
-rw-r--r--xlators/cluster/ec/src/ec-code-avx.h (renamed from xlators/cluster/dht/src/dht-helper.h)15
-rw-r--r--xlators/cluster/ec/src/ec-code-c.c11679
-rw-r--r--xlators/cluster/ec/src/ec-code-c.h27
-rw-r--r--xlators/cluster/ec/src/ec-code-intel.c594
-rw-r--r--xlators/cluster/ec/src/ec-code-intel.h191
-rw-r--r--xlators/cluster/ec/src/ec-code-sse.c101
-rw-r--r--xlators/cluster/ec/src/ec-code-sse.h18
-rw-r--r--xlators/cluster/ec/src/ec-code-x64.c144
-rw-r--r--xlators/cluster/ec/src/ec-code-x64.h18
-rw-r--r--xlators/cluster/ec/src/ec-code.c1060
-rw-r--r--xlators/cluster/ec/src/ec-code.h44
-rw-r--r--xlators/cluster/ec/src/ec-combine.c760
-rw-r--r--xlators/cluster/ec/src/ec-combine.h38
-rw-r--r--xlators/cluster/ec/src/ec-common.c2403
-rw-r--r--xlators/cluster/ec/src/ec-common.h285
-rw-r--r--xlators/cluster/ec/src/ec-data.c222
-rw-r--r--xlators/cluster/ec/src/ec-data.h318
-rw-r--r--xlators/cluster/ec/src/ec-dir-read.c395
-rw-r--r--xlators/cluster/ec/src/ec-dir-write.c703
-rw-r--r--xlators/cluster/ec/src/ec-fops.h416
-rw-r--r--xlators/cluster/ec/src/ec-galois.c183
-rw-r--r--xlators/cluster/ec/src/ec-galois.h32
-rw-r--r--xlators/cluster/ec/src/ec-generic.c952
-rw-r--r--xlators/cluster/ec/src/ec-gf.c11635
-rw-r--r--xlators/cluster/ec/src/ec-gf8.c5882
-rw-r--r--xlators/cluster/ec/src/ec-gf8.h (renamed from xlators/cluster/ec/src/ec-gf.h)11
-rw-r--r--xlators/cluster/ec/src/ec-heal.c4678
-rw-r--r--xlators/cluster/ec/src/ec-heald.c974
-rw-r--r--xlators/cluster/ec/src/ec-heald.h41
-rw-r--r--xlators/cluster/ec/src/ec-helpers.c482
-rw-r--r--xlators/cluster/ec/src/ec-helpers.h223
-rw-r--r--xlators/cluster/ec/src/ec-inode-read.c1280
-rw-r--r--xlators/cluster/ec/src/ec-inode-write.c2029
-rw-r--r--xlators/cluster/ec/src/ec-locks.c701
-rw-r--r--xlators/cluster/ec/src/ec-mem-types.h11
-rw-r--r--xlators/cluster/ec/src/ec-messages.h557
-rw-r--r--xlators/cluster/ec/src/ec-method.c488
-rw-r--r--xlators/cluster/ec/src/ec-method.h34
-rw-r--r--xlators/cluster/ec/src/ec-types.h690
-rw-r--r--xlators/cluster/ec/src/ec.c1937
-rw-r--r--xlators/cluster/ec/src/ec.h67
-rw-r--r--xlators/cluster/ha/src/Makefile.am16
-rw-r--r--xlators/cluster/ha/src/ha-helpers.c194
-rw-r--r--xlators/cluster/ha/src/ha-mem-types.h26
-rw-r--r--xlators/cluster/ha/src/ha.c4008
-rw-r--r--xlators/cluster/ha/src/ha.h53
-rw-r--r--xlators/cluster/map/src/Makefile.am16
-rw-r--r--xlators/cluster/map/src/map-helper.c343
-rw-r--r--xlators/cluster/map/src/map-mem-types.h24
-rw-r--r--xlators/cluster/map/src/map.c2561
-rw-r--r--xlators/cluster/map/src/map.h67
-rw-r--r--xlators/cluster/stripe/src/Makefile.am19
-rw-r--r--xlators/cluster/stripe/src/stripe-helpers.c677
-rw-r--r--xlators/cluster/stripe/src/stripe-mem-types.h31
-rw-r--r--xlators/cluster/stripe/src/stripe.c5763
-rw-r--r--xlators/cluster/stripe/src/stripe.h281
-rw-r--r--xlators/debug/Makefile.am2
-rw-r--r--xlators/debug/delay-gen/Makefile.am (renamed from xlators/features/changetimerecorder/Makefile.am)0
-rw-r--r--xlators/debug/delay-gen/src/Makefile.am11
-rw-r--r--xlators/debug/delay-gen/src/delay-gen-mem-types.h21
-rw-r--r--xlators/debug/delay-gen/src/delay-gen-messages.h26
-rw-r--r--xlators/debug/delay-gen/src/delay-gen.c697
-rw-r--r--xlators/debug/delay-gen/src/delay-gen.h27
-rw-r--r--xlators/debug/error-gen/src/Makefile.am5
-rw-r--r--xlators/debug/error-gen/src/error-gen-mem-types.h6
-rw-r--r--xlators/debug/error-gen/src/error-gen.c2992
-rw-r--r--xlators/debug/error-gen/src/error-gen.h26
-rw-r--r--xlators/debug/io-stats/src/Makefile.am7
-rw-r--r--xlators/debug/io-stats/src/io-stats-mem-types.h17
-rw-r--r--xlators/debug/io-stats/src/io-stats.c6161
-rw-r--r--xlators/debug/sink/Makefile.am (renamed from xlators/features/qemu-block/Makefile.am)1
-rw-r--r--xlators/debug/sink/src/Makefile.am14
-rw-r--r--xlators/debug/sink/src/sink.c94
-rw-r--r--xlators/debug/trace/src/Makefile.am5
-rw-r--r--xlators/debug/trace/src/trace-mem-types.h7
-rw-r--r--xlators/debug/trace/src/trace.c5412
-rw-r--r--xlators/debug/trace/src/trace.h65
-rw-r--r--xlators/encryption/Makefile.am3
-rw-r--r--xlators/encryption/crypt/src/Makefile.am24
-rw-r--r--xlators/encryption/crypt/src/atom.c957
-rw-r--r--xlators/encryption/crypt/src/crypt-common.h141
-rw-r--r--xlators/encryption/crypt/src/crypt-mem-types.h45
-rw-r--r--xlators/encryption/crypt/src/crypt.c4521
-rw-r--r--xlators/encryption/crypt/src/crypt.h900
-rw-r--r--xlators/encryption/crypt/src/data.c764
-rw-r--r--xlators/encryption/crypt/src/keys.c297
-rw-r--r--xlators/encryption/crypt/src/metadata.c614
-rw-r--r--xlators/encryption/crypt/src/metadata.h74
-rw-r--r--xlators/encryption/rot-13/src/rot-13.c196
-rw-r--r--xlators/features/Makefile.am15
-rw-r--r--xlators/features/arbiter/src/Makefile.am8
-rw-r--r--xlators/features/arbiter/src/arbiter-mem-types.h7
-rw-r--r--xlators/features/arbiter/src/arbiter.c503
-rw-r--r--xlators/features/arbiter/src/arbiter.h6
-rw-r--r--xlators/features/barrier/src/Makefile.am5
-rw-r--r--xlators/features/barrier/src/barrier-mem-types.h6
-rw-r--r--xlators/features/barrier/src/barrier.c1076
-rw-r--r--xlators/features/barrier/src/barrier.h129
-rw-r--r--xlators/features/bit-rot/src/bitd/Makefile.am19
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-bitd-messages.h479
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.c78
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h50
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.c2729
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.h36
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-ssm.c137
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-ssm.h28
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-tbf.c306
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-tbf.h70
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.c3244
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.h340
-rw-r--r--xlators/features/bit-rot/src/stub/Makefile.am12
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-common.h181
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-object-version.h12
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c796
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h38
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h273
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.c4727
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.h685
-rw-r--r--xlators/features/changelog/lib/examples/c/get-changes-multi.c80
-rw-r--r--xlators/features/changelog/lib/examples/c/get-changes.c102
-rw-r--r--xlators/features/changelog/lib/examples/c/get-history.c148
-rwxr-xr-x[-rw-r--r--]xlators/features/changelog/lib/examples/python/changes.py13
-rw-r--r--xlators/features/changelog/lib/examples/python/libgfchangelog.py3
-rw-r--r--xlators/features/changelog/lib/src/Makefile.am34
-rw-r--r--xlators/features/changelog/lib/src/changelog-lib-messages.h74
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-api.c318
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-helpers.c250
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-helpers.h244
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal-handler.c1615
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal.h94
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-reborp.c622
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-rpc.c93
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-rpc.h10
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog.c872
-rw-r--r--xlators/features/changelog/lib/src/gf-history-changelog.c1476
-rw-r--r--xlators/features/changelog/src/Makefile.am26
-rw-r--r--xlators/features/changelog/src/changelog-barrier.c138
-rw-r--r--xlators/features/changelog/src/changelog-encoders.c300
-rw-r--r--xlators/features/changelog/src/changelog-encoders.h42
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.c578
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.h110
-rw-r--r--xlators/features/changelog/src/changelog-helpers.c2837
-rw-r--r--xlators/features/changelog/src/changelog-helpers.h890
-rw-r--r--xlators/features/changelog/src/changelog-mem-types.h33
-rw-r--r--xlators/features/changelog/src/changelog-messages.h172
-rw-r--r--xlators/features/changelog/src/changelog-misc.h164
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.c499
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.h59
-rw-r--r--xlators/features/changelog/src/changelog-rpc.c588
-rw-r--r--xlators/features/changelog/src/changelog-rpc.h10
-rw-r--r--xlators/features/changelog/src/changelog-rt.c63
-rw-r--r--xlators/features/changelog/src/changelog-rt.h14
-rw-r--r--xlators/features/changelog/src/changelog.c4502
-rw-r--r--xlators/features/changetimerecorder/src/Makefile.am23
-rw-r--r--xlators/features/changetimerecorder/src/changetimerecorder.c1691
-rw-r--r--xlators/features/changetimerecorder/src/changetimerecorder.h21
-rw-r--r--xlators/features/changetimerecorder/src/ctr-helper.c300
-rw-r--r--xlators/features/changetimerecorder/src/ctr-helper.h730
-rw-r--r--xlators/features/changetimerecorder/src/ctr-xlator-ctx.c409
-rw-r--r--xlators/features/changetimerecorder/src/ctr-xlator-ctx.h90
-rw-r--r--xlators/features/cloudsync/Makefile.am (renamed from xlators/features/ganesha/Makefile.am)0
-rw-r--r--xlators/features/cloudsync/src/Makefile.am46
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h (renamed from xlators/features/changetimerecorder/src/ctr_mem_types.h)20
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.c60
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.h134
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-c.py324
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-h.py31
-rw-r--r--xlators/features/cloudsync/src/cloudsync-mem-types.h22
-rw-r--r--xlators/features/cloudsync/src/cloudsync-messages.h16
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am (renamed from xlators/storage/bd/Makefile.am)0
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am11
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am (renamed from xlators/encryption/rot-13/Makefile.am)2
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c584
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h50
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am (renamed from xlators/cluster/map/Makefile.am)2
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h203
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c842
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h84
-rw-r--r--xlators/features/cloudsync/src/cloudsync.c2076
-rw-r--r--xlators/features/cloudsync/src/cloudsync.h123
-rw-r--r--xlators/features/compress/src/Makefile.am8
-rw-r--r--xlators/features/compress/src/cdc-helper.c761
-rw-r--r--xlators/features/compress/src/cdc-mem-types.h10
-rw-r--r--xlators/features/compress/src/cdc.c578
-rw-r--r--xlators/features/compress/src/cdc.h76
-rw-r--r--xlators/features/filter/Makefile.am3
-rw-r--r--xlators/features/filter/src/Makefile.am16
-rw-r--r--xlators/features/filter/src/filter.c1729
-rw-r--r--xlators/features/ganesha/src/Makefile.am18
-rw-r--r--xlators/features/ganesha/src/ganesha.c90
-rw-r--r--xlators/features/ganesha/src/ganesha.h18
-rw-r--r--xlators/features/gfid-access/src/Makefile.am5
-rw-r--r--xlators/features/gfid-access/src/gfid-access-mem-types.h9
-rw-r--r--xlators/features/gfid-access/src/gfid-access.c2198
-rw-r--r--xlators/features/gfid-access/src/gfid-access.h132
-rw-r--r--xlators/features/glupy/Makefile.am3
-rw-r--r--xlators/features/glupy/doc/README.md44
-rw-r--r--xlators/features/glupy/doc/TESTING9
-rw-r--r--xlators/features/glupy/doc/test.vol10
-rw-r--r--xlators/features/glupy/examples/Makefile.am5
-rw-r--r--xlators/features/glupy/examples/debug-trace.py775
-rw-r--r--xlators/features/glupy/examples/helloworld.py19
-rw-r--r--xlators/features/glupy/examples/negative.py91
-rw-r--r--xlators/features/glupy/src/Makefile.am24
-rw-r--r--xlators/features/glupy/src/__init__.py.in2
-rw-r--r--xlators/features/glupy/src/glupy.c2496
-rw-r--r--xlators/features/glupy/src/glupy.h56
-rw-r--r--xlators/features/glupy/src/glupy/Makefile.am5
-rw-r--r--xlators/features/glupy/src/glupy/__init__.py852
-rw-r--r--xlators/features/glupy/src/setup.py.in24
-rw-r--r--xlators/features/index/src/Makefile.am10
-rw-r--r--xlators/features/index/src/index-mem-types.h15
-rw-r--r--xlators/features/index/src/index-messages.h33
-rw-r--r--xlators/features/index/src/index.c3422
-rw-r--r--xlators/features/index/src/index.h89
-rw-r--r--xlators/features/leases/Makefile.am (renamed from xlators/encryption/crypt/Makefile.am)2
-rw-r--r--xlators/features/leases/src/Makefile.am20
-rw-r--r--xlators/features/leases/src/leases-internal.c1412
-rw-r--r--xlators/features/leases/src/leases-mem-types.h27
-rw-r--r--xlators/features/leases/src/leases-messages.h33
-rw-r--r--xlators/features/leases/src/leases.c1168
-rw-r--r--xlators/features/leases/src/leases.h259
-rw-r--r--xlators/features/locks/src/Makefile.am14
-rw-r--r--xlators/features/locks/src/clear.c684
-rw-r--r--xlators/features/locks/src/clear.h66
-rw-r--r--xlators/features/locks/src/common.c1948
-rw-r--r--xlators/features/locks/src/common.h258
-rw-r--r--xlators/features/locks/src/entrylk.c1493
-rw-r--r--xlators/features/locks/src/inodelk.c1592
-rw-r--r--xlators/features/locks/src/locks-mem-types.h23
-rw-r--r--xlators/features/locks/src/locks.h327
-rw-r--r--xlators/features/locks/src/pl-messages.h29
-rw-r--r--xlators/features/locks/src/posix.c6351
-rw-r--r--xlators/features/locks/src/reservelk.c582
-rw-r--r--xlators/features/locks/tests/unit-test.c101
-rw-r--r--xlators/features/mac-compat/Makefile.am3
-rw-r--r--xlators/features/mac-compat/src/Makefile.am15
-rw-r--r--xlators/features/mac-compat/src/mac-compat.c344
-rw-r--r--xlators/features/mac-compat/src/mac-compat.h41
-rw-r--r--xlators/features/marker/src/Makefile.am13
-rw-r--r--xlators/features/marker/src/marker-common.c74
-rw-r--r--xlators/features/marker/src/marker-common.h7
-rw-r--r--xlators/features/marker/src/marker-mem-types.h23
-rw-r--r--xlators/features/marker/src/marker-quota-helper.c605
-rw-r--r--xlators/features/marker/src/marker-quota-helper.h81
-rw-r--r--xlators/features/marker/src/marker-quota.c5600
-rw-r--r--xlators/features/marker/src/marker-quota.h199
-rw-r--r--xlators/features/marker/src/marker.c4805
-rw-r--r--xlators/features/marker/src/marker.h225
-rw-r--r--xlators/features/metadisp/Makefile.am (renamed from xlators/cluster/stripe/Makefile.am)2
-rw-r--r--xlators/features/metadisp/src/Makefile.am38
-rw-r--r--xlators/features/metadisp/src/backend.c45
-rw-r--r--xlators/features/metadisp/src/fops-tmpl.c10
-rw-r--r--xlators/features/metadisp/src/gen-fops.py160
-rw-r--r--xlators/features/metadisp/src/metadisp-create.c101
-rw-r--r--xlators/features/metadisp/src/metadisp-fops.h51
-rw-r--r--xlators/features/metadisp/src/metadisp-fsync.c54
-rw-r--r--xlators/features/metadisp/src/metadisp-lookup.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-open.c70
-rw-r--r--xlators/features/metadisp/src/metadisp-readdir.c65
-rw-r--r--xlators/features/metadisp/src/metadisp-setattr.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-stat.c124
-rw-r--r--xlators/features/metadisp/src/metadisp-unlink.c160
-rw-r--r--xlators/features/metadisp/src/metadisp.c46
-rw-r--r--xlators/features/metadisp/src/metadisp.h45
-rw-r--r--xlators/features/namespace/Makefile.am3
-rw-r--r--xlators/features/namespace/src/Makefile.am17
-rw-r--r--xlators/features/namespace/src/namespace.c1344
-rw-r--r--xlators/features/namespace/src/namespace.h23
-rw-r--r--xlators/features/path-convertor/Makefile.am3
-rw-r--r--xlators/features/path-convertor/src/Makefile.am15
-rw-r--r--xlators/features/path-convertor/src/path-mem-types.h22
-rw-r--r--xlators/features/path-convertor/src/path.c1223
-rw-r--r--xlators/features/protect/Makefile.am3
-rw-r--r--xlators/features/protect/src/Makefile.am21
-rw-r--r--xlators/features/protect/src/prot_client.c213
-rw-r--r--xlators/features/protect/src/prot_dht.c163
-rw-r--r--xlators/features/protect/src/prot_server.c46
-rw-r--r--xlators/features/qemu-block/src/Makefile.am156
-rw-r--r--xlators/features/qemu-block/src/bdrv-xlator.c386
-rw-r--r--xlators/features/qemu-block/src/bh-syncop.c43
-rw-r--r--xlators/features/qemu-block/src/clock-timer.c55
-rw-r--r--xlators/features/qemu-block/src/coroutine-synctask.c111
-rw-r--r--xlators/features/qemu-block/src/monitor-logging.c45
-rw-r--r--xlators/features/qemu-block/src/qb-coroutines.c662
-rw-r--r--xlators/features/qemu-block/src/qb-coroutines.h30
-rw-r--r--xlators/features/qemu-block/src/qemu-block-memory-types.h25
-rw-r--r--xlators/features/qemu-block/src/qemu-block.c1134
-rw-r--r--xlators/features/qemu-block/src/qemu-block.h109
-rw-r--r--xlators/features/quiesce/src/Makefile.am7
-rw-r--r--xlators/features/quiesce/src/quiesce-mem-types.h7
-rw-r--r--xlators/features/quiesce/src/quiesce-messages.h28
-rw-r--r--xlators/features/quiesce/src/quiesce.c3365
-rw-r--r--xlators/features/quiesce/src/quiesce.h68
-rw-r--r--xlators/features/quota/src/Makefile.am22
-rw-r--r--xlators/features/quota/src/quota-enforcer-client.c724
-rw-r--r--xlators/features/quota/src/quota-mem-types.h28
-rw-r--r--xlators/features/quota/src/quota-messages.h252
-rw-r--r--xlators/features/quota/src/quota.c8248
-rw-r--r--xlators/features/quota/src/quota.h419
-rw-r--r--xlators/features/quota/src/quotad-aggregator.c757
-rw-r--r--xlators/features/quota/src/quotad-aggregator.h29
-rw-r--r--xlators/features/quota/src/quotad-helpers.c116
-rw-r--r--xlators/features/quota/src/quotad-helpers.h4
-rw-r--r--xlators/features/quota/src/quotad.c315
-rw-r--r--xlators/features/read-only/src/Makefile.am11
-rw-r--r--xlators/features/read-only/src/read-only-common.c545
-rw-r--r--xlators/features/read-only/src/read-only-common.h112
-rw-r--r--xlators/features/read-only/src/read-only-mem-types.h6
-rw-r--r--xlators/features/read-only/src/read-only.c176
-rw-r--r--xlators/features/read-only/src/read-only.h22
-rw-r--r--xlators/features/read-only/src/worm-helper.c395
-rw-r--r--xlators/features/read-only/src/worm-helper.h44
-rw-r--r--xlators/features/read-only/src/worm.c746
-rw-r--r--xlators/features/sdfs/Makefile.am3
-rw-r--r--xlators/features/sdfs/src/Makefile.am19
-rw-r--r--xlators/features/sdfs/src/sdfs-messages.h67
-rw-r--r--xlators/features/sdfs/src/sdfs.c1479
-rw-r--r--xlators/features/sdfs/src/sdfs.h49
-rw-r--r--xlators/features/selinux/Makefile.am3
-rw-r--r--xlators/features/selinux/src/Makefile.am20
-rw-r--r--xlators/features/selinux/src/selinux-mem-types.h (renamed from xlators/features/filter/src/filter-mem-types.h)15
-rw-r--r--xlators/features/selinux/src/selinux-messages.h30
-rw-r--r--xlators/features/selinux/src/selinux.c323
-rw-r--r--xlators/features/selinux/src/selinux.h24
-rw-r--r--xlators/features/shard/src/Makefile.am7
-rw-r--r--xlators/features/shard/src/shard-mem-types.h15
-rw-r--r--xlators/features/shard/src/shard-messages.h39
-rw-r--r--xlators/features/shard/src/shard.c9386
-rw-r--r--xlators/features/shard/src/shard.h485
-rw-r--r--xlators/features/snapview-client/src/Makefile.am7
-rw-r--r--xlators/features/snapview-client/src/snapview-client-mem-types.h12
-rw-r--r--xlators/features/snapview-client/src/snapview-client-messages.h71
-rw-r--r--xlators/features/snapview-client/src/snapview-client.c4125
-rw-r--r--xlators/features/snapview-client/src/snapview-client.h176
-rw-r--r--xlators/features/snapview-server/src/Makefile.am25
-rw-r--r--xlators/features/snapview-server/src/snapview-server-helpers.c900
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mem-types.h15
-rw-r--r--xlators/features/snapview-server/src/snapview-server-messages.h54
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mgmt.c840
-rw-r--r--xlators/features/snapview-server/src/snapview-server.c4135
-rw-r--r--xlators/features/snapview-server/src/snapview-server.h294
-rw-r--r--xlators/features/thin-arbiter/Makefile.am3
-rw-r--r--xlators/features/thin-arbiter/src/Makefile.am22
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter-mem-types.h19
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter-messages.h28
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter.c661
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter.h59
-rw-r--r--xlators/features/trash/src/Makefile.am7
-rw-r--r--xlators/features/trash/src/trash-mem-types.h13
-rw-r--r--xlators/features/trash/src/trash.c4290
-rw-r--r--xlators/features/trash/src/trash.h100
-rw-r--r--xlators/features/upcall/src/Makefile.am12
-rw-r--r--xlators/features/upcall/src/upcall-cache-invalidation.h38
-rw-r--r--xlators/features/upcall/src/upcall-internal.c1026
-rw-r--r--xlators/features/upcall/src/upcall-mem-types.h13
-rw-r--r--xlators/features/upcall/src/upcall-messages.h54
-rw-r--r--xlators/features/upcall/src/upcall.c3079
-rw-r--r--xlators/features/upcall/src/upcall.h201
-rw-r--r--xlators/features/utime/Makefile.am3
-rw-r--r--xlators/features/utime/src/Makefile.am41
-rw-r--r--xlators/features/utime/src/utime-autogen-fops-tmpl.c28
-rw-r--r--xlators/features/utime/src/utime-autogen-fops-tmpl.h22
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-c.py147
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-h.py35
-rw-r--r--xlators/features/utime/src/utime-helpers.c110
-rw-r--r--xlators/features/utime/src/utime-helpers.h25
-rw-r--r--xlators/features/utime/src/utime-mem-types.h21
-rw-r--r--xlators/features/utime/src/utime-messages.h29
-rw-r--r--xlators/features/utime/src/utime.c392
-rw-r--r--xlators/features/utime/src/utime.h23
-rw-r--r--xlators/lib/src/libxlator.c796
-rw-r--r--xlators/lib/src/libxlator.h116
-rw-r--r--xlators/meta/Makefile.am2
-rw-r--r--xlators/meta/src/Makefile.am5
-rw-r--r--xlators/meta/src/active-link.c25
-rw-r--r--xlators/meta/src/cmdline-file.c32
-rw-r--r--xlators/meta/src/frames-file.c164
-rw-r--r--xlators/meta/src/graph-dir.c129
-rw-r--r--xlators/meta/src/graphs-dir.c87
-rw-r--r--xlators/meta/src/history-file.c33
-rw-r--r--xlators/meta/src/logfile-link.c25
-rw-r--r--xlators/meta/src/logging-dir.c42
-rw-r--r--xlators/meta/src/loglevel-file.c40
-rw-r--r--xlators/meta/src/mallinfo-file.c25
-rw-r--r--xlators/meta/src/measure-file.c37
-rw-r--r--xlators/meta/src/meminfo-file.c33
-rw-r--r--xlators/meta/src/meta-defaults.c775
-rw-r--r--xlators/meta/src/meta-helpers.c445
-rw-r--r--xlators/meta/src/meta-hooks.h6
-rw-r--r--xlators/meta/src/meta-mem-types.h17
-rw-r--r--xlators/meta/src/meta.c293
-rw-r--r--xlators/meta/src/meta.h181
-rw-r--r--xlators/meta/src/name-file.c34
-rw-r--r--xlators/meta/src/option-file.c36
-rw-r--r--xlators/meta/src/options-dir.c62
-rw-r--r--xlators/meta/src/private-file.c33
-rw-r--r--xlators/meta/src/process_uuid-file.c28
-rw-r--r--xlators/meta/src/profile-file.c33
-rw-r--r--xlators/meta/src/root-dir.c105
-rw-r--r--xlators/meta/src/subvolume-link.c67
-rw-r--r--xlators/meta/src/subvolumes-dir.c65
-rw-r--r--xlators/meta/src/top-link.c31
-rw-r--r--xlators/meta/src/type-file.c34
-rw-r--r--xlators/meta/src/version-file.c29
-rw-r--r--xlators/meta/src/view-dir.c27
-rw-r--r--xlators/meta/src/volfile-file.c71
-rw-r--r--xlators/meta/src/xlator-dir.c130
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am62
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.c287
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.h16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c1235
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c4633
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-helper.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-helper.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c223
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-errno.h33
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ganesha.c1432
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c10859
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c235
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.h51
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c478
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h47
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c10086
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c4035
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c985
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.h80
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c1258
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c454
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h101
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h5016
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c1759
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c4721
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h86
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.c1187
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.h35
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c313
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c13184
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h327
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c1559
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.h67
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c876
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.h58
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c181
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h34
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c3447
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.h (renamed from xlators/features/ganesha/src/ganesha-mem-types.h)16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c323
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rcu.h8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c2065
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c1488
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-reset-brick.c376
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c4085
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c284
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.h22
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c694
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.h28
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c153
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h42
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c920
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.h25
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c2468
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h253
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.c66
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.h16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c738
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.h20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c6709
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h153
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c17220
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c367
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c8016
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h255
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c1111
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.h50
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c646
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h100
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c3286
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h102
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c207
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c21069
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h818
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c9849
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h342
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c5133
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c4927
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c3601
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h1755
-rw-r--r--xlators/mount/fuse/src/Makefile.am9
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.c10300
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.h796
-rw-r--r--xlators/mount/fuse/src/fuse-helpers.c1048
-rw-r--r--xlators/mount/fuse/src/fuse-mem-types.h25
-rw-r--r--xlators/mount/fuse/src/fuse-resolve.c1014
-rwxr-xr-xxlators/mount/fuse/utils/mount.glusterfs.in224
-rwxr-xr-xxlators/mount/fuse/utils/mount_glusterfs.in36
-rw-r--r--xlators/nfs/server/src/Makefile.am21
-rw-r--r--xlators/nfs/server/src/acl3.c1585
-rw-r--r--xlators/nfs/server/src/acl3.h18
-rw-r--r--xlators/nfs/server/src/auth-cache.c537
-rw-r--r--xlators/nfs/server/src/auth-cache.h26
-rw-r--r--xlators/nfs/server/src/exports.c1632
-rw-r--r--xlators/nfs/server/src/exports.h69
-rw-r--r--xlators/nfs/server/src/mount3-auth.c682
-rw-r--r--xlators/nfs/server/src/mount3-auth.h28
-rw-r--r--xlators/nfs/server/src/mount3.c6367
-rw-r--r--xlators/nfs/server/src/mount3.h209
-rw-r--r--xlators/nfs/server/src/mount3udp_svc.c344
-rw-r--r--xlators/nfs/server/src/netgroups.c1012
-rw-r--r--xlators/nfs/server/src/netgroups.h31
-rw-r--r--xlators/nfs/server/src/nfs-common.c652
-rw-r--r--xlators/nfs/server/src/nfs-common.h51
-rw-r--r--xlators/nfs/server/src/nfs-fops.c2365
-rw-r--r--xlators/nfs/server/src/nfs-fops.h305
-rw-r--r--xlators/nfs/server/src/nfs-generics.c336
-rw-r--r--xlators/nfs/server/src/nfs-generics.h146
-rw-r--r--xlators/nfs/server/src/nfs-inodes.c768
-rw-r--r--xlators/nfs/server/src/nfs-inodes.h60
-rw-r--r--xlators/nfs/server/src/nfs-mem-types.h70
-rw-r--r--xlators/nfs/server/src/nfs-messages.h1743
-rw-r--r--xlators/nfs/server/src/nfs.c3406
-rw-r--r--xlators/nfs/server/src/nfs.h172
-rw-r--r--xlators/nfs/server/src/nfs3-fh.c216
-rw-r--r--xlators/nfs/server/src/nfs3-fh.h105
-rw-r--r--xlators/nfs/server/src/nfs3-helpers.c5039
-rw-r--r--xlators/nfs/server/src/nfs3-helpers.h275
-rw-r--r--xlators/nfs/server/src/nfs3.c9261
-rw-r--r--xlators/nfs/server/src/nfs3.h382
-rw-r--r--xlators/nfs/server/src/nfsserver.sym12
-rw-r--r--xlators/nfs/server/src/nlm4.c4332
-rw-r--r--xlators/nfs/server/src/nlm4.h126
-rw-r--r--xlators/nfs/server/src/nlmcbk_svc.c176
-rw-r--r--xlators/performance/Makefile.am3
-rw-r--r--xlators/performance/io-cache/src/Makefile.am3
-rw-r--r--xlators/performance/io-cache/src/io-cache-messages.h144
-rw-r--r--xlators/performance/io-cache/src/io-cache.c3107
-rw-r--r--xlators/performance/io-cache/src/io-cache.h400
-rw-r--r--xlators/performance/io-cache/src/ioc-inode.c286
-rw-r--r--xlators/performance/io-cache/src/ioc-mem-types.h24
-rw-r--r--xlators/performance/io-cache/src/page.c1455
-rw-r--r--xlators/performance/io-threads/src/Makefile.am5
-rw-r--r--xlators/performance/io-threads/src/io-threads-messages.h108
-rw-r--r--xlators/performance/io-threads/src/io-threads.c1968
-rw-r--r--xlators/performance/io-threads/src/io-threads.h115
-rw-r--r--xlators/performance/io-threads/src/iot-mem-types.h9
-rw-r--r--xlators/performance/md-cache/src/Makefile.am3
-rw-r--r--xlators/performance/md-cache/src/md-cache-mem-types.h13
-rw-r--r--xlators/performance/md-cache/src/md-cache-messages.h57
-rw-r--r--xlators/performance/md-cache/src/md-cache.c4705
-rw-r--r--xlators/performance/nl-cache/Makefile.am3
-rw-r--r--xlators/performance/nl-cache/src/Makefile.am12
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-helper.c1201
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-mem-types.h27
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-messages.h29
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.c840
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.h175
-rw-r--r--xlators/performance/open-behind/src/Makefile.am5
-rw-r--r--xlators/performance/open-behind/src/open-behind-mem-types.h9
-rw-r--r--xlators/performance/open-behind/src/open-behind-messages.h77
-rw-r--r--xlators/performance/open-behind/src/open-behind.c1622
-rw-r--r--xlators/performance/quick-read/src/Makefile.am5
-rw-r--r--xlators/performance/quick-read/src/quick-read-mem-types.h16
-rw-r--r--xlators/performance/quick-read/src/quick-read-messages.h112
-rw-r--r--xlators/performance/quick-read/src/quick-read.c2139
-rw-r--r--xlators/performance/quick-read/src/quick-read.h83
-rw-r--r--xlators/performance/read-ahead/src/Makefile.am5
-rw-r--r--xlators/performance/read-ahead/src/page.c847
-rw-r--r--xlators/performance/read-ahead/src/read-ahead-mem-types.h17
-rw-r--r--xlators/performance/read-ahead/src/read-ahead-messages.h87
-rw-r--r--xlators/performance/read-ahead/src/read-ahead.c1836
-rw-r--r--xlators/performance/read-ahead/src/read-ahead.h178
-rw-r--r--xlators/performance/readdir-ahead/src/Makefile.am9
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead-mem-types.h12
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead-messages.h97
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.c1581
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.h94
-rw-r--r--xlators/performance/symlink-cache/Makefile.am3
-rw-r--r--xlators/performance/symlink-cache/src/Makefile.am15
-rw-r--r--xlators/performance/symlink-cache/src/symlink-cache-messages.h93
-rw-r--r--xlators/performance/symlink-cache/src/symlink-cache.c401
-rw-r--r--xlators/performance/write-behind/src/Makefile.am5
-rw-r--r--xlators/performance/write-behind/src/write-behind-mem-types.h16
-rw-r--r--xlators/performance/write-behind/src/write-behind-messages.h114
-rw-r--r--xlators/performance/write-behind/src/write-behind.c4205
-rw-r--r--xlators/playground/rot-13/Makefile.am (renamed from xlators/cluster/ha/Makefile.am)0
-rw-r--r--xlators/playground/rot-13/src/Makefile.am (renamed from xlators/encryption/rot-13/src/Makefile.am)5
-rw-r--r--xlators/playground/rot-13/src/rot-13.c166
-rw-r--r--xlators/playground/rot-13/src/rot-13.h (renamed from xlators/encryption/rot-13/src/rot-13.h)4
-rw-r--r--xlators/playground/template/src/Makefile.am7
-rw-r--r--xlators/playground/template/src/template.c182
-rw-r--r--xlators/playground/template/src/template.h36
-rw-r--r--xlators/protocol/auth/addr/src/Makefile.am6
-rw-r--r--xlators/protocol/auth/addr/src/addr.c470
-rw-r--r--xlators/protocol/auth/login/src/Makefile.am5
-rw-r--r--xlators/protocol/auth/login/src/login.c318
-rw-r--r--xlators/protocol/client/src/Makefile.am13
-rw-r--r--xlators/protocol/client/src/client-callback.c328
-rw-r--r--xlators/protocol/client/src/client-common.c3589
-rw-r--r--xlators/protocol/client/src/client-common.h630
-rw-r--r--xlators/protocol/client/src/client-handshake.c2641
-rw-r--r--xlators/protocol/client/src/client-helpers.c1029
-rw-r--r--xlators/protocol/client/src/client-lk.c755
-rw-r--r--xlators/protocol/client/src/client-mem-types.h17
-rw-r--r--xlators/protocol/client/src/client-messages.h776
-rw-r--r--xlators/protocol/client/src/client-rpc-fops.c10479
-rw-r--r--xlators/protocol/client/src/client-rpc-fops_v2.c6177
-rw-r--r--xlators/protocol/client/src/client.c4329
-rw-r--r--xlators/protocol/client/src/client.h571
-rw-r--r--xlators/protocol/server/src/Makefile.am22
-rw-r--r--xlators/protocol/server/src/authenticate.c327
-rw-r--r--xlators/protocol/server/src/authenticate.h34
-rw-r--r--xlators/protocol/server/src/server-common.c842
-rw-r--r--xlators/protocol/server/src/server-common.h199
-rw-r--r--xlators/protocol/server/src/server-handshake.c1434
-rw-r--r--xlators/protocol/server/src/server-helpers.c2150
-rw-r--r--xlators/protocol/server/src/server-helpers.h94
-rw-r--r--xlators/protocol/server/src/server-mem-types.h23
-rw-r--r--xlators/protocol/server/src/server-messages.h931
-rw-r--r--xlators/protocol/server/src/server-resolve.c957
-rw-r--r--xlators/protocol/server/src/server-rpc-fops.c9169
-rw-r--r--xlators/protocol/server/src/server-rpc-fops_v2.c6031
-rw-r--r--xlators/protocol/server/src/server.c2796
-rw-r--r--xlators/protocol/server/src/server.h275
-rw-r--r--xlators/storage/Makefile.am4
-rw-r--r--xlators/storage/bd/src/Makefile.am20
-rw-r--r--xlators/storage/bd/src/bd-aio.c523
-rw-r--r--xlators/storage/bd/src/bd-aio.h36
-rw-r--r--xlators/storage/bd/src/bd-helper.c1019
-rw-r--r--xlators/storage/bd/src/bd-mem-types.h27
-rw-r--r--xlators/storage/bd/src/bd.c2448
-rw-r--r--xlators/storage/bd/src/bd.h168
-rw-r--r--xlators/storage/posix/src/Makefile.am20
-rw-r--r--xlators/storage/posix/src/posix-aio.c954
-rw-r--r--xlators/storage/posix/src/posix-aio.h22
-rw-r--r--xlators/storage/posix/src/posix-common.c1524
-rw-r--r--xlators/storage/posix/src/posix-entry-ops.c2496
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.c243
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.h28
-rw-r--r--xlators/storage/posix/src/posix-handle.c1561
-rw-r--r--xlators/storage/posix/src/posix-handle.h427
-rw-r--r--xlators/storage/posix/src/posix-helpers.c4818
-rw-r--r--xlators/storage/posix/src/posix-inode-fd-ops.c6004
-rw-r--r--xlators/storage/posix/src/posix-inode-handle.h118
-rw-r--r--xlators/storage/posix/src/posix-mem-types.h20
-rw-r--r--xlators/storage/posix/src/posix-messages.h951
-rw-r--r--xlators/storage/posix/src/posix-metadata-disk.h31
-rw-r--r--xlators/storage/posix/src/posix-metadata.c916
-rw-r--r--xlators/storage/posix/src/posix-metadata.h71
-rw-r--r--xlators/storage/posix/src/posix.c6718
-rw-r--r--xlators/storage/posix/src/posix.h734
-rw-r--r--xlators/system/posix-acl/src/Makefile.am8
-rw-r--r--xlators/system/posix-acl/src/posix-acl-mem-types.h13
-rw-r--r--xlators/system/posix-acl/src/posix-acl-messages.h28
-rw-r--r--xlators/system/posix-acl/src/posix-acl-xattr.c229
-rw-r--r--xlators/system/posix-acl/src/posix-acl-xattr.h17
-rw-r--r--xlators/system/posix-acl/src/posix-acl.c3309
-rw-r--r--xlators/system/posix-acl/src/posix-acl.h33
-rw-r--r--xlators/xlator.sym1
730 files changed, 367307 insertions, 309848 deletions
diff --git a/xlators/Makefile.am b/xlators/Makefile.am
index 2a03f905b6d..ef20cbb64fa 100644
--- a/xlators/Makefile.am
+++ b/xlators/Makefile.am
@@ -1,4 +1,13 @@
-SUBDIRS = cluster storage protocol performance debug features encryption mount nfs mgmt system \
- playground meta
+if BUILD_GNFS
+ GNFS_DIR = nfs
+endif
+
+DIST_SUBDIRS = cluster storage protocol performance debug features \
+ mount nfs mgmt system playground meta
+
+SUBDIRS = cluster storage protocol performance debug features \
+ mount ${GNFS_DIR} mgmt system playground meta
+
+EXTRA_DIST = xlator.sym
CLEANFILES =
diff --git a/xlators/cluster/Makefile.am b/xlators/cluster/Makefile.am
index 903fbb39f12..8e067d5ab58 100644
--- a/xlators/cluster/Makefile.am
+++ b/xlators/cluster/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = stripe afr dht ec
+SUBDIRS = afr dht ec
CLEANFILES =
diff --git a/xlators/cluster/afr/src/Makefile.am b/xlators/cluster/afr/src/Makefile.am
index ac66bf3bb11..610819b28fc 100644
--- a/xlators/cluster/afr/src/Makefile.am
+++ b/xlators/cluster/afr/src/Makefile.am
@@ -1,4 +1,4 @@
-xlator_LTLIBRARIES = afr.la pump.la
+xlator_LTLIBRARIES = afr.la
xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/cluster
afr_common_source = afr-dir-read.c afr-dir-write.c afr-inode-read.c \
@@ -10,22 +10,19 @@ AFR_SELFHEAL_SOURCES = afr-self-heal-common.c afr-self-heal-data.c \
afr-self-heal-entry.c afr-self-heal-metadata.c afr-self-heald.c \
afr-self-heal-name.c
-afr_la_LDFLAGS = -module -avoid-version
+afr_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
afr_la_SOURCES = $(afr_common_source) $(AFR_SELFHEAL_SOURCES) afr.c
afr_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-pump_la_LDFLAGS = -module -avoid-version
-pump_la_SOURCES = $(afr_common_source) $(AFR_SELFHEAL_SOURCES) pump.c
-pump_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-
noinst_HEADERS = afr.h afr-transaction.h afr-inode-write.h afr-inode-read.h \
afr-dir-read.h afr-dir-write.h afr-self-heal.h afr-mem-types.h \
- afr-common.c afr-self-heald.h pump.h \
+ afr-common.c afr-self-heald.h \
$(top_builddir)/xlators/lib/src/libxlator.h afr-messages.h
AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/xlators/lib/src \
- -I$(top_srcdir)/rpc/rpc-lib/src
+ -I$(top_srcdir)/rpc/rpc-lib/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
AM_CFLAGS = -Wall $(GF_CFLAGS)
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index de92affc6d3..032ab5c8001 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -15,24 +15,20 @@
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "afr.h"
-#include "dict.h"
-#include "xlator.h"
-#include "hashfn.h"
-#include "logging.h"
-#include "stack.h"
-#include "list.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "common-utils.h"
-#include "compat-errno.h"
-#include "compat.h"
-#include "byte-order.h"
-#include "statedump.h"
-#include "inode.h"
-
-#include "fd.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/hashfn.h>
+#include <glusterfs/list.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/statedump.h>
+#include <glusterfs/events.h>
+#include <glusterfs/upcall-utils.h>
#include "afr-inode-read.h"
#include "afr-inode-write.h"
@@ -43,56 +39,799 @@
#include "afr-self-heald.h"
#include "afr-messages.h"
-call_frame_t *
-afr_copy_frame (call_frame_t *base)
+int32_t
+afr_quorum_errno(afr_private_t *priv)
+{
+ return ENOTCONN;
+}
+
+gf_boolean_t
+afr_is_private_directory(afr_private_t *priv, uuid_t pargfid, const char *name,
+ pid_t pid)
{
- afr_local_t *local = NULL;
- call_frame_t *frame = NULL;
- int op_errno = 0;
+ if (!__is_root_gfid(pargfid)) {
+ return _gf_false;
+ }
+
+ if (strcmp(name, GF_REPLICATE_TRASH_DIR) == 0) {
+ /*For backward compatibility /.landfill is private*/
+ return _gf_true;
+ }
- frame = copy_frame (base);
- if (!frame)
- return NULL;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local) {
- AFR_STACK_DESTROY (frame);
- return NULL;
- }
+ if (pid == GF_CLIENT_PID_GSYNCD) {
+ /*geo-rep needs to create/sync private directory on slave because
+ * it appears in changelog*/
+ return _gf_false;
+ }
- return frame;
+ if (pid == GF_CLIENT_PID_GLFS_HEAL || pid == GF_CLIENT_PID_SELF_HEALD) {
+ if (strcmp(name, priv->anon_inode_name) == 0) {
+ /* anonymous-inode dir is private*/
+ return _gf_true;
+ }
+ } else {
+ if (strncmp(name, AFR_ANON_DIR_PREFIX, strlen(AFR_ANON_DIR_PREFIX)) ==
+ 0) {
+ /* anonymous-inode dir prefix is private for geo-rep to work*/
+ return _gf_true;
+ }
+ }
+
+ return _gf_false;
}
-int
-__afr_inode_ctx_get (xlator_t *this, inode_t *inode, afr_inode_ctx_t **ctx)
+void
+afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
+ unsigned char *replies)
{
- uint64_t ctx_int = 0;
- int ret = -1;
- afr_inode_ctx_t *tmp_ctx = NULL;
+ int i = 0;
- ret = __inode_ctx_get (inode, this, &ctx_int);
- if (ret) {
- tmp_ctx = GF_CALLOC (1, sizeof (afr_inode_ctx_t),
- gf_afr_mt_inode_ctx_t);
- if (!tmp_ctx)
- goto out;
-
- ctx_int = (long) tmp_ctx;
- ret = __inode_ctx_set (inode, this, &ctx_int);
- if (ret) {
- GF_FREE (tmp_ctx);
- goto out;
- }
- tmp_ctx->spb_choice = -1;
- tmp_ctx->read_subvol = 0;
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].valid && local->replies[i].op_ret == 0) {
+ replies[i] = 1;
} else {
- tmp_ctx = (afr_inode_ctx_t *) ctx_int;
+ replies[i] = 0;
}
+ }
+}
- *ctx = tmp_ctx;
- ret = 0;
+int
+afr_fav_child_reset_sink_xattrs(void *opaque);
+
+int
+afr_fav_child_reset_sink_xattrs_cbk(int ret, call_frame_t *frame, void *opaque);
+
+static void
+afr_discover_done(call_frame_t *frame, xlator_t *this);
+
+int
+afr_dom_lock_acquire_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int i = (long)cookie;
+
+ local->cont.lk.dom_lock_op_ret[i] = op_ret;
+ local->cont.lk.dom_lock_op_errno[i] = op_errno;
+ if (op_ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to acquire %s on %s",
+ uuid_utoa(local->fd->inode->gfid), AFR_LK_HEAL_DOM,
+ priv->children[i]->name);
+ } else {
+ local->cont.lk.dom_locked_nodes[i] = 1;
+ }
+
+ syncbarrier_wake(&local->barrier);
+
+ return 0;
+}
+
+int
+afr_dom_lock_acquire(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+ int i = 0;
+
+ priv = frame->this->private;
+ local = frame->local;
+ local->cont.lk.dom_locked_nodes = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.locked_nodes),
+ gf_afr_mt_char);
+ if (!local->cont.lk.dom_locked_nodes) {
+ return -ENOMEM;
+ }
+ local->cont.lk.dom_lock_op_ret = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.dom_lock_op_ret),
+ gf_afr_mt_int32_t);
+ if (!local->cont.lk.dom_lock_op_ret) {
+ return -ENOMEM; /* CALLOC'd members are freed in afr_local_cleanup. */
+ }
+ local->cont.lk.dom_lock_op_errno = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.dom_lock_op_errno),
+ gf_afr_mt_int32_t);
+ if (!local->cont.lk.dom_lock_op_errno) {
+ return -ENOMEM; /* CALLOC'd members are freed in afr_local_cleanup. */
+ }
+ flock.l_type = F_WRLCK;
+
+ AFR_ONALL(frame, afr_dom_lock_acquire_cbk, finodelk, AFR_LK_HEAL_DOM,
+ local->fd, F_SETLK, &flock, NULL);
+
+ if (!afr_has_quorum(local->cont.lk.dom_locked_nodes, frame->this, NULL))
+ goto blocking_lock;
+
+ /*If any of the bricks returned EAGAIN, we still need blocking locks.*/
+ if (AFR_COUNT(local->cont.lk.dom_locked_nodes, priv->child_count) !=
+ priv->child_count) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->cont.lk.dom_lock_op_ret[i] == -1 &&
+ local->cont.lk.dom_lock_op_errno[i] == EAGAIN)
+ goto blocking_lock;
+ }
+ }
+
+ return 0;
+
+blocking_lock:
+ afr_dom_lock_release(frame);
+ AFR_ONALL(frame, afr_dom_lock_acquire_cbk, finodelk, AFR_LK_HEAL_DOM,
+ local->fd, F_SETLKW, &flock, NULL);
+ if (!afr_has_quorum(local->cont.lk.dom_locked_nodes, frame->this, NULL)) {
+ afr_dom_lock_release(frame);
+ return -afr_quorum_errno(priv);
+ }
+
+ return 0;
+}
+
+int
+afr_dom_lock_release_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int i = (long)cookie;
+
+ if (op_ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to release %s on %s", local->loc.path,
+ AFR_LK_HEAL_DOM, priv->children[i]->name);
+ }
+ local->cont.lk.dom_locked_nodes[i] = 0;
+
+ syncbarrier_wake(&local->barrier);
+
+ return 0;
+}
+
+void
+afr_dom_lock_release(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ unsigned char *locked_on = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+
+ local = frame->local;
+ priv = frame->this->private;
+ locked_on = local->cont.lk.dom_locked_nodes;
+ if (AFR_COUNT(locked_on, priv->child_count) == 0)
+ return;
+ flock.l_type = F_UNLCK;
+
+ AFR_ONLIST(locked_on, frame, afr_dom_lock_release_cbk, finodelk,
+ AFR_LK_HEAL_DOM, local->fd, F_SETLK, &flock, NULL);
+
+ return;
+}
+
+static void
+afr_lk_heal_info_cleanup(afr_lk_heal_info_t *info)
+{
+ if (!info)
+ return;
+ if (info->xdata_req)
+ dict_unref(info->xdata_req);
+ if (info->fd)
+ fd_unref(info->fd);
+ GF_FREE(info->locked_nodes);
+ GF_FREE(info->child_up_event_gen);
+ GF_FREE(info->child_down_event_gen);
+ GF_FREE(info);
+}
+
+static int
+afr_add_lock_to_saved_locks(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ afr_local_t *local = frame->local;
+ afr_lk_heal_info_t *info = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int ret = -ENOMEM;
+
+ info = GF_CALLOC(sizeof(*info), 1, gf_afr_mt_lk_heal_info_t);
+ if (!info) {
+ goto cleanup;
+ }
+ INIT_LIST_HEAD(&info->pos);
+ info->fd = fd_ref(local->fd);
+ info->cmd = local->cont.lk.cmd;
+ info->pid = frame->root->pid;
+ info->flock = local->cont.lk.user_flock;
+ info->xdata_req = dict_copy_with_ref(local->xdata_req, NULL);
+ if (!info->xdata_req) {
+ goto cleanup;
+ }
+ info->lk_owner = frame->root->lk_owner;
+ info->locked_nodes = GF_MALLOC(
+ sizeof(*info->locked_nodes) * priv->child_count, gf_afr_mt_char);
+ if (!info->locked_nodes) {
+ goto cleanup;
+ }
+ memcpy(info->locked_nodes, local->cont.lk.locked_nodes,
+ sizeof(*info->locked_nodes) * priv->child_count);
+ info->child_up_event_gen = GF_CALLOC(sizeof(*info->child_up_event_gen),
+ priv->child_count, gf_afr_mt_int32_t);
+ if (!info->child_up_event_gen) {
+ goto cleanup;
+ }
+ info->child_down_event_gen = GF_CALLOC(sizeof(*info->child_down_event_gen),
+ priv->child_count,
+ gf_afr_mt_int32_t);
+ if (!info->child_down_event_gen) {
+ goto cleanup;
+ }
+
+ LOCK(&local->fd->lock);
+ {
+ fd_ctx = __afr_fd_ctx_get(local->fd, this);
+ if (fd_ctx)
+ fd_ctx->lk_heal_info = info;
+ }
+ UNLOCK(&local->fd->lock);
+ if (!fd_ctx) {
+ goto cleanup;
+ }
+
+ LOCK(&priv->lock);
+ {
+ list_add_tail(&info->pos, &priv->saved_locks);
+ }
+ UNLOCK(&priv->lock);
+
+ return 0;
+cleanup:
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to add lock to healq",
+ uuid_utoa(local->fd->inode->gfid));
+ if (info) {
+ afr_lk_heal_info_cleanup(info);
+ if (fd_ctx) {
+ LOCK(&local->fd->lock);
+ {
+ fd_ctx->lk_heal_info = NULL;
+ }
+ UNLOCK(&local->fd->lock);
+ }
+ }
+ return ret;
+}
+
+static int
+afr_remove_lock_from_saved_locks(afr_local_t *local, xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ struct gf_flock flock = local->cont.lk.user_flock;
+ afr_lk_heal_info_t *info = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int ret = -EINVAL;
+
+ fd_ctx = afr_fd_ctx_get(local->fd, this);
+ if (!fd_ctx || !fd_ctx->lk_heal_info) {
+ goto out;
+ }
+
+ info = fd_ctx->lk_heal_info;
+ if ((info->flock.l_start != flock.l_start) ||
+ (info->flock.l_whence != flock.l_whence) ||
+ (info->flock.l_len != flock.l_len)) {
+ /*TODO: Compare lkowners too.*/
+ goto out;
+ }
+
+ LOCK(&priv->lock);
+ {
+ list_del(&fd_ctx->lk_heal_info->pos);
+ }
+ UNLOCK(&priv->lock);
+
+ afr_lk_heal_info_cleanup(info);
+ fd_ctx->lk_heal_info = NULL;
+ ret = 0;
out:
- return ret;
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to remove lock from healq",
+ uuid_utoa(local->fd->inode->gfid));
+ return ret;
+}
+
+int
+afr_lock_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ int i = (long)cookie;
+
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+ if (op_ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "Failed to heal lock on child %d for %s", i,
+ uuid_utoa(local->fd->inode->gfid));
+ }
+ syncbarrier_wake(&local->barrier);
+ return 0;
+}
+
+int
+afr_getlk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ int i = (long)cookie;
+
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+ if (op_ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "Failed getlk for %s", uuid_utoa(local->fd->inode->gfid));
+ } else {
+ local->cont.lk.getlk_rsp[i] = *lock;
+ }
+
+ syncbarrier_wake(&local->barrier);
+ return 0;
+}
+
+static gf_boolean_t
+afr_does_lk_owner_match(call_frame_t *frame, afr_private_t *priv,
+ afr_lk_heal_info_t *info)
+{
+ int i = 0;
+ afr_local_t *local = frame->local;
+ struct gf_flock flock = {
+ 0,
+ };
+ gf_boolean_t ret = _gf_true;
+ char *wind_on = alloca0(priv->child_count);
+ unsigned char *success_replies = alloca0(priv->child_count);
+ local->cont.lk.getlk_rsp = GF_CALLOC(sizeof(*local->cont.lk.getlk_rsp),
+ priv->child_count, gf_afr_mt_gf_lock);
+
+ flock = info->flock;
+ for (i = 0; i < priv->child_count; i++) {
+ if (info->locked_nodes[i])
+ wind_on[i] = 1;
+ }
+
+ AFR_ONLIST(wind_on, frame, afr_getlk_cbk, lk, info->fd, F_GETLK, &flock,
+ info->xdata_req);
+
+ afr_fill_success_replies(local, priv, success_replies);
+ if (AFR_COUNT(success_replies, priv->child_count) == 0) {
+ ret = _gf_false;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid || local->replies[i].op_ret != 0)
+ continue;
+ if (local->cont.lk.getlk_rsp[i].l_type == F_UNLCK)
+ continue;
+ /*TODO: Do we really need to compare lkowner if F_UNLCK is true?*/
+ if (!is_same_lkowner(&local->cont.lk.getlk_rsp[i].l_owner,
+ &info->lk_owner)) {
+ ret = _gf_false;
+ break;
+ }
+ }
+out:
+ afr_local_replies_wipe(local, priv);
+ GF_FREE(local->cont.lk.getlk_rsp);
+ local->cont.lk.getlk_rsp = NULL;
+ return ret;
+}
+
+static void
+afr_mark_fd_bad(fd_t *fd, xlator_t *this)
+{
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ if (!fd)
+ return;
+ LOCK(&fd->lock);
+ {
+ fd_ctx = __afr_fd_ctx_get(fd, this);
+ if (fd_ctx) {
+ fd_ctx->is_fd_bad = _gf_true;
+ fd_ctx->lk_heal_info = NULL;
+ }
+ }
+ UNLOCK(&fd->lock);
+}
+
+static void
+afr_add_lock_to_lkhealq(afr_private_t *priv, afr_lk_heal_info_t *info)
+{
+ LOCK(&priv->lock);
+ {
+ list_del(&info->pos);
+ list_add_tail(&info->pos, &priv->lk_healq);
+ }
+ UNLOCK(&priv->lock);
+}
+
+static void
+afr_lock_heal_do(call_frame_t *frame, afr_private_t *priv,
+ afr_lk_heal_info_t *info)
+{
+ int i = 0;
+ int op_errno = 0;
+ int32_t *current_event_gen = NULL;
+ afr_local_t *local = frame->local;
+ xlator_t *this = frame->this;
+ char *wind_on = alloca0(priv->child_count);
+ gf_boolean_t retry = _gf_true;
+
+ frame->root->pid = info->pid;
+ lk_owner_copy(&frame->root->lk_owner, &info->lk_owner);
+
+ op_errno = -afr_dom_lock_acquire(frame);
+ if ((op_errno != 0)) {
+ goto release;
+ }
+
+ if (!afr_does_lk_owner_match(frame, priv, info)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_LK_HEAL_DOM,
+ "Ignoring lock heal for %s since lk-onwers mismatch. "
+ "Lock possibly pre-empted by another client.",
+ uuid_utoa(info->fd->inode->gfid));
+ goto release;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (info->locked_nodes[i])
+ continue;
+ wind_on[i] = 1;
+ }
+
+ current_event_gen = alloca(priv->child_count);
+ memcpy(current_event_gen, info->child_up_event_gen,
+ priv->child_count * sizeof *current_event_gen);
+ AFR_ONLIST(wind_on, frame, afr_lock_heal_cbk, lk, info->fd, info->cmd,
+ &info->flock, info->xdata_req);
+
+ LOCK(&priv->lock);
+ {
+ for (i = 0; i < priv->child_count; i++) {
+ if (!wind_on[i])
+ continue;
+ if ((!local->replies[i].valid) || (local->replies[i].op_ret != 0)) {
+ continue;
+ }
+
+ if ((current_event_gen[i] == info->child_up_event_gen[i]) &&
+ (current_event_gen[i] > info->child_down_event_gen[i])) {
+ info->locked_nodes[i] = 1;
+ retry = _gf_false;
+ list_del_init(&info->pos);
+ list_add_tail(&info->pos, &priv->saved_locks);
+ } else {
+ /*We received subsequent child up/down events while heal was in
+ * progress; don't mark child as healed. Attempt again on the
+ * new child up*/
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_LK_HEAL_DOM,
+ "Event gen mismatch: skipped healing lock on child %d "
+ "for %s.",
+ i, uuid_utoa(info->fd->inode->gfid));
+ }
+ }
+ }
+ UNLOCK(&priv->lock);
+
+release:
+ afr_dom_lock_release(frame);
+ if (retry)
+ afr_add_lock_to_lkhealq(priv, info);
+ return;
+}
+
+static int
+afr_lock_heal_done(int ret, call_frame_t *frame, void *opaque)
+{
+ STACK_DESTROY(frame->root);
+ return 0;
+}
+
+static int
+afr_lock_heal(void *opaque)
+{
+ call_frame_t *frame = (call_frame_t *)opaque;
+ call_frame_t *iter_frame = NULL;
+ xlator_t *this = frame->this;
+ afr_private_t *priv = this->private;
+ afr_lk_heal_info_t *info = NULL;
+ afr_lk_heal_info_t *tmp = NULL;
+ struct list_head healq = {
+ 0,
+ };
+ int ret = 0;
+
+ iter_frame = afr_copy_frame(frame);
+ if (!iter_frame) {
+ return ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&healq);
+ LOCK(&priv->lock);
+ {
+ list_splice_init(&priv->lk_healq, &healq);
+ }
+ UNLOCK(&priv->lock);
+
+ list_for_each_entry_safe(info, tmp, &healq, pos)
+ {
+ GF_ASSERT((AFR_COUNT(info->locked_nodes, priv->child_count) <
+ priv->child_count));
+ ((afr_local_t *)(iter_frame->local))->fd = fd_ref(info->fd);
+ afr_lock_heal_do(iter_frame, priv, info);
+ AFR_STACK_RESET(iter_frame);
+ if (iter_frame->local == NULL) {
+ ret = ENOTCONN;
+ gf_msg(frame->this->name, GF_LOG_ERROR, ENOTCONN,
+ AFR_MSG_LK_HEAL_DOM,
+ "Aborting processing of lk_healq."
+ "Healing will be reattempted on next child up for locks "
+ "that are still in quorum.");
+ LOCK(&priv->lock);
+ {
+ list_add_tail(&healq, &priv->lk_healq);
+ }
+ UNLOCK(&priv->lock);
+ break;
+ }
+ }
+
+ AFR_STACK_DESTROY(iter_frame);
+ return ret;
+}
+
+static int
+__afr_lock_heal_synctask(xlator_t *this, afr_private_t *priv, int child)
+{
+ int ret = 0;
+ call_frame_t *frame = NULL;
+ afr_lk_heal_info_t *info = NULL;
+ afr_lk_heal_info_t *tmp = NULL;
+
+ if (priv->shd.iamshd)
+ return 0;
+
+ list_for_each_entry_safe(info, tmp, &priv->saved_locks, pos)
+ {
+ info->child_up_event_gen[child] = priv->event_generation;
+ list_del_init(&info->pos);
+ list_add_tail(&info->pos, &priv->lk_healq);
+ }
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame)
+ return -1;
+
+ ret = synctask_new(this->ctx->env, afr_lock_heal, afr_lock_heal_done, frame,
+ frame);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_LK_HEAL_DOM,
+ "Failed to launch lock heal synctask");
+
+ return ret;
+}
+
+static int
+__afr_mark_pending_lk_heal(xlator_t *this, afr_private_t *priv, int child)
+{
+ afr_lk_heal_info_t *info = NULL;
+ afr_lk_heal_info_t *tmp = NULL;
+
+ if (priv->shd.iamshd)
+ return 0;
+ list_for_each_entry_safe(info, tmp, &priv->saved_locks, pos)
+ {
+ info->child_down_event_gen[child] = priv->event_generation;
+ if (info->locked_nodes[child] == 1)
+ info->locked_nodes[child] = 0;
+ if (!afr_has_quorum(info->locked_nodes, this, NULL)) {
+ /* Since the lock was lost on quorum no. of nodes, we should
+ * not attempt to heal it anymore. Some other client could have
+ * acquired the lock, modified data and released it and this
+ * client wouldn't know about it if we heal it.*/
+ afr_mark_fd_bad(info->fd, this);
+ list_del(&info->pos);
+ afr_lk_heal_info_cleanup(info);
+ /* We're not winding an unlock on the node where the lock is still
+ * present because when fencing logic switches over to the new
+ * client (since we marked the fd bad), it should preempt any
+ * existing lock. */
+ }
+ }
+ return 0;
+}
+
+gf_boolean_t
+afr_is_consistent_io_possible(afr_local_t *local, afr_private_t *priv,
+ int32_t *op_errno)
+{
+ if (priv->consistent_io && local->call_count != priv->child_count) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, AFR_MSG_SUBVOLS_DOWN,
+ "All subvolumes are not up");
+ if (op_errno)
+ *op_errno = ENOTCONN;
+ return _gf_false;
+ }
+ return _gf_true;
}
+
+gf_boolean_t
+afr_is_lock_mode_mandatory(dict_t *xdata)
+{
+ int ret = 0;
+ uint32_t lk_mode = GF_LK_ADVISORY;
+
+ ret = dict_get_uint32(xdata, GF_LOCK_MODE, &lk_mode);
+ if (!ret && lk_mode == GF_LK_MANDATORY)
+ return _gf_true;
+
+ return _gf_false;
+}
+
+call_frame_t *
+afr_copy_frame(call_frame_t *base)
+{
+ afr_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ int op_errno = 0;
+
+ frame = copy_frame(base);
+ if (!frame)
+ return NULL;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local) {
+ AFR_STACK_DESTROY(frame);
+ return NULL;
+ }
+
+ return frame;
+}
+
+/* Check if an entry or inode could be undergoing a transaction. */
+gf_boolean_t
+afr_is_possibly_under_txn(afr_transaction_type type, afr_local_t *local,
+ xlator_t *this)
+{
+ int i = 0;
+ int tmp = 0;
+ afr_private_t *priv = NULL;
+ GF_UNUSED char *key = NULL;
+ int keylen = 0;
+
+ priv = this->private;
+
+ if (type == AFR_ENTRY_TRANSACTION) {
+ key = GLUSTERFS_PARENT_ENTRYLK;
+ keylen = SLEN(GLUSTERFS_PARENT_ENTRYLK);
+ } else if (type == AFR_DATA_TRANSACTION) {
+ /*FIXME: Use GLUSTERFS_INODELK_DOM_COUNT etc. once
+ * pl_inodelk_xattr_fill supports separate keys for different
+ * domains.*/
+ key = GLUSTERFS_INODELK_COUNT;
+ keylen = SLEN(GLUSTERFS_INODELK_COUNT);
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].xdata)
+ continue;
+ if (dict_get_int32n(local->replies[i].xdata, key, keylen, &tmp) == 0)
+ if (tmp)
+ return _gf_true;
+ }
+
+ return _gf_false;
+}
+
+static void
+afr_inode_ctx_destroy(afr_inode_ctx_t *ctx)
+{
+ int i = 0;
+
+ if (!ctx)
+ return;
+
+ for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++) {
+ GF_FREE(ctx->pre_op_done[i]);
+ }
+
+ GF_FREE(ctx);
+}
+
+int
+__afr_inode_ctx_get(xlator_t *this, inode_t *inode, afr_inode_ctx_t **ctx)
+{
+ uint64_t ctx_int = 0;
+ int ret = -1;
+ int i = -1;
+ int num_locks = -1;
+ afr_inode_ctx_t *ictx = NULL;
+ afr_lock_t *lock = NULL;
+ afr_private_t *priv = this->private;
+
+ ret = __inode_ctx_get(inode, this, &ctx_int);
+ if (ret == 0) {
+ *ctx = (afr_inode_ctx_t *)(uintptr_t)ctx_int;
+ return 0;
+ }
+
+ ictx = GF_CALLOC(1, sizeof(afr_inode_ctx_t), gf_afr_mt_inode_ctx_t);
+ if (!ictx)
+ goto out;
+
+ for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++) {
+ ictx->pre_op_done[i] = GF_CALLOC(sizeof *ictx->pre_op_done[i],
+ priv->child_count, gf_afr_mt_int32_t);
+ if (!ictx->pre_op_done[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ num_locks = sizeof(ictx->lock) / sizeof(afr_lock_t);
+ for (i = 0; i < num_locks; i++) {
+ lock = &ictx->lock[i];
+ INIT_LIST_HEAD(&lock->post_op);
+ INIT_LIST_HEAD(&lock->frozen);
+ INIT_LIST_HEAD(&lock->waiting);
+ INIT_LIST_HEAD(&lock->owners);
+ }
+
+ ctx_int = (uint64_t)(uintptr_t)ictx;
+ ret = __inode_ctx_set(inode, this, &ctx_int);
+ if (ret) {
+ goto out;
+ }
+
+ ictx->spb_choice = -1;
+ ictx->read_subvol = 0;
+ ictx->write_subvol = 0;
+ ictx->lock_count = 0;
+ ret = 0;
+ *ctx = ictx;
+out:
+ if (ret) {
+ afr_inode_ctx_destroy(ictx);
+ }
+ return ret;
+}
+
/*
* INODE CTX 64-bit VALUE FORMAT FOR SMALL (<= 16) SUBVOL COUNTS:
*
@@ -124,1535 +863,2258 @@ out:
*/
int
-__afr_inode_read_subvol_get_small (inode_t *inode, xlator_t *this,
- unsigned char *data, unsigned char *metadata,
- int *event_p)
-{
- afr_private_t *priv = NULL;
- int ret = -1;
- uint16_t datamap = 0;
- uint16_t metadatamap = 0;
- uint32_t event = 0;
- uint64_t val = 0;
- int i = 0;
- afr_inode_ctx_t *ctx = NULL;
+__afr_set_in_flight_sb_status(xlator_t *this, afr_local_t *local,
+ inode_t *inode)
+{
+ int i = 0;
+ int txn_type = 0;
+ int count = 0;
+ int index = -1;
+ uint16_t datamap_old = 0;
+ uint16_t metadatamap_old = 0;
+ uint16_t datamap = 0;
+ uint16_t metadatamap = 0;
+ uint16_t tmp_map = 0;
+ uint16_t mask = 0;
+ uint32_t event = 0;
+ uint64_t val = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ txn_type = local->transaction.type;
+
+ if (txn_type == AFR_DATA_TRANSACTION)
+ val = local->inode_ctx->write_subvol;
+ else
+ val = local->inode_ctx->read_subvol;
+
+ metadatamap_old = metadatamap = (val & 0x000000000000ffff);
+ datamap_old = datamap = (val & 0x00000000ffff0000) >> 16;
+ event = (val & 0xffffffff00000000) >> 32;
+
+ if (txn_type == AFR_DATA_TRANSACTION)
+ tmp_map = datamap;
+ else if (txn_type == AFR_METADATA_TRANSACTION)
+ tmp_map = metadatamap;
+
+ count = gf_bits_count(tmp_map);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.failed_subvols[i])
+ continue;
+
+ mask = 1 << i;
+ if (txn_type == AFR_METADATA_TRANSACTION)
+ metadatamap &= ~mask;
+ else if (txn_type == AFR_DATA_TRANSACTION)
+ datamap &= ~mask;
+ }
+
+ switch (txn_type) {
+ case AFR_METADATA_TRANSACTION:
+ if ((metadatamap_old != 0) && (metadatamap == 0) && (count == 1)) {
+ index = gf_bits_index(tmp_map);
+ local->transaction.in_flight_sb_errno = local->replies[index]
+ .op_errno;
+ local->transaction.in_flight_sb = _gf_true;
+ metadatamap |= (1 << index);
+ }
+ if (metadatamap_old != metadatamap) {
+ __afr_inode_need_refresh_set(inode, this);
+ }
+ break;
+
+ case AFR_DATA_TRANSACTION:
+ if ((datamap_old != 0) && (datamap == 0) && (count == 1)) {
+ index = gf_bits_index(tmp_map);
+ local->transaction.in_flight_sb_errno = local->replies[index]
+ .op_errno;
+ local->transaction.in_flight_sb = _gf_true;
+ datamap |= (1 << index);
+ }
+ if (datamap_old != datamap)
+ __afr_inode_need_refresh_set(inode, this);
+ break;
- priv = this->private;
+ default:
+ break;
+ }
+
+ val = ((uint64_t)metadatamap) | (((uint64_t)datamap) << 16) |
+ (((uint64_t)event) << 32);
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret < 0)
- return ret;
+ if (txn_type == AFR_DATA_TRANSACTION)
+ local->inode_ctx->write_subvol = val;
+ local->inode_ctx->read_subvol = val;
- val = ctx->read_subvol;
+ return 0;
+}
- metadatamap = (val & 0x000000000000ffff);
- datamap = (val & 0x00000000ffff0000) >> 16;
- event = (val & 0xffffffff00000000) >> 32;
+gf_boolean_t
+afr_is_symmetric_error(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int op_errno = 0;
+ int i_errno = 0;
+ gf_boolean_t matching_errors = _gf_true;
+ int i = 0;
- for (i = 0; i < priv->child_count; i++) {
- if (metadata)
- metadata[i] = (metadatamap >> i) & 1;
- if (data)
- data[i] = (datamap >> i) & 1;
- }
+ priv = this->private;
+ local = frame->local;
- if (event_p)
- *event_p = event;
- return ret;
-}
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret != -1) {
+ /* Operation succeeded on at least one subvol,
+ so it is not a failed-everywhere situation.
+ */
+ matching_errors = _gf_false;
+ break;
+ }
+ i_errno = local->replies[i].op_errno;
+
+ if (i_errno == ENOTCONN) {
+ /* ENOTCONN is not a symmetric error. We do not
+ know if the operation was performed on the
+ backend or not.
+ */
+ matching_errors = _gf_false;
+ break;
+ }
+
+ if (!op_errno) {
+ op_errno = i_errno;
+ } else if (op_errno != i_errno) {
+ /* Mismatching op_errno's */
+ matching_errors = _gf_false;
+ break;
+ }
+ }
+ return matching_errors;
+}
int
-__afr_inode_read_subvol_set_small (inode_t *inode, xlator_t *this,
- unsigned char *data, unsigned char *metadata,
- int event)
+afr_set_in_flight_sb_status(xlator_t *this, call_frame_t *frame, inode_t *inode)
{
- afr_private_t *priv = NULL;
- uint16_t datamap = 0;
- uint16_t metadatamap = 0;
- uint64_t val = 0;
- int i = 0;
- int ret = -1;
- afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
+ local = frame->local;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret)
- goto out;
+ /* If this transaction saw no failures, then exit. */
+ if (AFR_COUNT(local->transaction.failed_subvols, priv->child_count) == 0)
+ return 0;
- for (i = 0; i < priv->child_count; i++) {
- if (data[i])
- datamap |= (1 << i);
- if (metadata[i])
- metadatamap |= (1 << i);
- }
+ if (afr_is_symmetric_error(frame, this))
+ return 0;
- val = ((uint64_t) metadatamap) |
- (((uint64_t) datamap) << 16) |
- (((uint64_t) event) << 32);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_set_in_flight_sb_status(this, local, inode);
+ }
+ UNLOCK(&inode->lock);
- ctx->read_subvol = val;
+ return ret;
+}
- ret = 0;
-out:
+int
+__afr_inode_read_subvol_get_small(inode_t *inode, xlator_t *this,
+ unsigned char *data, unsigned char *metadata,
+ int *event_p)
+{
+ afr_private_t *priv = NULL;
+ int ret = -1;
+ uint16_t datamap = 0;
+ uint16_t metadatamap = 0;
+ uint32_t event = 0;
+ uint64_t val = 0;
+ int i = 0;
+ afr_inode_ctx_t *ctx = NULL;
+
+ priv = this->private;
+
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret < 0)
return ret;
+
+ val = ctx->read_subvol;
+
+ metadatamap = (val & 0x000000000000ffff);
+ datamap = (val & 0x00000000ffff0000) >> 16;
+ event = (val & 0xffffffff00000000) >> 32;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (metadata)
+ metadata[i] = (metadatamap >> i) & 1;
+ if (data)
+ data[i] = (datamap >> i) & 1;
+ }
+
+ if (event_p)
+ *event_p = event;
+ return ret;
}
int
-__afr_inode_read_subvol_reset_small (inode_t *inode, xlator_t *this)
+__afr_inode_read_subvol_set_small(inode_t *inode, xlator_t *this,
+ unsigned char *data, unsigned char *metadata,
+ int event)
{
- int ret = -1;
- uint16_t datamap = 0;
- uint16_t metadatamap = 0;
- uint32_t event = 0;
- uint64_t val = 0;
- afr_inode_ctx_t *ctx = NULL;
+ afr_private_t *priv = NULL;
+ uint16_t datamap = 0;
+ uint16_t metadatamap = 0;
+ uint64_t val = 0;
+ int i = 0;
+ int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret)
- return ret;
+ priv = this->private;
- val = ctx->read_subvol;
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret)
+ goto out;
- metadatamap = (val & 0x000000000000ffff) >> 0;
- datamap = (val & 0x00000000ffff0000) >> 16;
- event = 0;
+ for (i = 0; i < priv->child_count; i++) {
+ if (data[i])
+ datamap |= (1 << i);
+ if (metadata[i])
+ metadatamap |= (1 << i);
+ }
- val = ((uint64_t) metadatamap) |
- (((uint64_t) datamap) << 16) |
- (((uint64_t) event) << 32);
+ val = ((uint64_t)metadatamap) | (((uint64_t)datamap) << 16) |
+ (((uint64_t)event) << 32);
- ctx->read_subvol = val;
+ ctx->read_subvol = val;
- return ret;
+ ret = 0;
+out:
+ return ret;
}
-
int
-__afr_inode_read_subvol_get (inode_t *inode, xlator_t *this,
- unsigned char *data, unsigned char *metadata,
- int *event_p)
+__afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int *event_p)
{
- afr_private_t *priv = NULL;
- int ret = -1;
+ afr_private_t *priv = NULL;
+ int ret = -1;
- priv = this->private;
+ priv = this->private;
- if (priv->child_count <= 16)
- ret = __afr_inode_read_subvol_get_small (inode, this, data,
- metadata, event_p);
- else
- /* TBD: allocate structure with array and read from it */
- ret = -1;
+ if (priv->child_count <= 16)
+ ret = __afr_inode_read_subvol_get_small(inode, this, data, metadata,
+ event_p);
+ else
+ /* TBD: allocate structure with array and read from it */
+ ret = -1;
- return ret;
+ return ret;
}
int
-__afr_inode_split_brain_choice_get (inode_t *inode, xlator_t *this,
- int *spb_choice)
+__afr_inode_split_brain_choice_get(inode_t *inode, xlator_t *this,
+ int *spb_choice)
{
- afr_inode_ctx_t *ctx = NULL;
- int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret < 0)
- return ret;
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret < 0)
+ return ret;
- *spb_choice = ctx->spb_choice;
- return 0;
+ *spb_choice = ctx->spb_choice;
+ return 0;
}
int
-__afr_inode_read_subvol_set (inode_t *inode, xlator_t *this, unsigned char *data,
- unsigned char *metadata, int event)
+__afr_inode_read_subvol_set(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int event)
{
- afr_private_t *priv = NULL;
- int ret = -1;
+ afr_private_t *priv = NULL;
+ int ret = -1;
- priv = this->private;
+ priv = this->private;
- if (priv->child_count <= 16)
- ret = __afr_inode_read_subvol_set_small (inode, this, data,
- metadata, event);
- else
- ret = -1;
+ if (priv->child_count <= 16)
+ ret = __afr_inode_read_subvol_set_small(inode, this, data, metadata,
+ event);
+ else
+ ret = -1;
- return ret;
+ return ret;
}
int
-__afr_inode_split_brain_choice_set (inode_t *inode, xlator_t *this,
- int spb_choice)
+__afr_inode_split_brain_choice_set(inode_t *inode, xlator_t *this,
+ int spb_choice)
{
- afr_inode_ctx_t *ctx = NULL;
- int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret)
- goto out;
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret)
+ goto out;
- ctx->spb_choice = spb_choice;
+ ctx->spb_choice = spb_choice;
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
int
-__afr_inode_read_subvol_reset (inode_t *inode, xlator_t *this)
+afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int *event_p)
{
- afr_private_t *priv = NULL;
- int ret = -1;
-
- priv = this->private;
+ int ret = -1;
- if (priv->child_count <= 16)
- ret = __afr_inode_read_subvol_reset_small (inode, this);
- else
- ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- return ret;
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_read_subvol_get(inode, this, data, metadata, event_p);
+ }
+ UNLOCK(&inode->lock);
+out:
+ return ret;
}
-
int
-afr_inode_read_subvol_get (inode_t *inode, xlator_t *this, unsigned char *data,
- unsigned char *metadata, int *event_p)
+afr_inode_get_readable(call_frame_t *frame, inode_t *inode, xlator_t *this,
+ unsigned char *readable, int *event_p, int type)
{
- int ret = -1;
+ afr_private_t *priv = this->private;
+ afr_local_t *local = frame->local;
+ unsigned char *data = alloca0(priv->child_count);
+ unsigned char *metadata = alloca0(priv->child_count);
+ int data_count = 0;
+ int metadata_count = 0;
+ int event_generation = 0;
+ int ret = 0;
+
+ ret = afr_inode_read_subvol_get(inode, this, data, metadata,
+ &event_generation);
+ if (ret == -1)
+ return -EIO;
+
+ data_count = AFR_COUNT(data, priv->child_count);
+ metadata_count = AFR_COUNT(metadata, priv->child_count);
+
+ if (inode->ia_type == IA_IFDIR) {
+ /* For directories, allow even if it is in data split-brain. */
+ if (type == AFR_METADATA_TRANSACTION || local->op == GF_FOP_STAT ||
+ local->op == GF_FOP_FSTAT) {
+ if (!metadata_count)
+ return -EIO;
+ }
+ } else {
+ /* For files, abort in case of data/metadata split-brain. */
+ if (!data_count || !metadata_count) {
+ return -EIO;
+ }
+ }
+
+ if (type == AFR_METADATA_TRANSACTION && readable)
+ memcpy(readable, metadata, priv->child_count * sizeof *metadata);
+ if (type == AFR_DATA_TRANSACTION && readable) {
+ if (!data_count)
+ memcpy(readable, local->child_up,
+ priv->child_count * sizeof *readable);
+ else
+ memcpy(readable, data, priv->child_count * sizeof *data);
+ }
+ if (event_p)
+ *event_p = event_generation;
+ return 0;
+}
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+static int
+afr_inode_split_brain_choice_get(inode_t *inode, xlator_t *this,
+ int *spb_choice)
+{
+ int ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_read_subvol_get (inode, this, data,
- metadata, event_p);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_split_brain_choice_get(inode, this, spb_choice);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
+/*
+ * frame is used to get the favourite policy. Since
+ * afr_inode_split_brain_choice_get was called with afr_open, it is possible to
+ * have a frame with out local->replies. So in that case, frame is passed as
+ * null, hence this function will handle the frame NULL case.
+ */
int
-afr_inode_get_readable (call_frame_t *frame, inode_t *inode, xlator_t *this,
- unsigned char *readable, int *event_p, int type)
+afr_split_brain_read_subvol_get(inode_t *inode, xlator_t *this,
+ call_frame_t *frame, int *spb_subvol)
{
+ int ret = -1;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- afr_private_t *priv = this->private;
- afr_local_t *local = frame->local;
- unsigned char *data = alloca0 (priv->child_count);
- unsigned char *metadata = alloca0 (priv->child_count);
- int data_count = 0;
- int metadata_count = 0;
- int event_generation = 0;
- int ret = 0;
+ GF_VALIDATE_OR_GOTO("afr", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, spb_subvol, out);
- /* We don't care about split-brains for entry transactions. */
- if (type == AFR_ENTRY_TRANSACTION || type == AFR_ENTRY_RENAME_TRANSACTION)
- return 0;
+ priv = this->private;
- ret = afr_inode_read_subvol_get (inode, this, data, metadata,
- &event_generation);
- if (ret == -1)
- return -EIO;
-
- data_count = AFR_COUNT (data, priv->child_count);
- metadata_count = AFR_COUNT (metadata, priv->child_count);
+ ret = afr_inode_split_brain_choice_get(inode, this, spb_subvol);
+ if (*spb_subvol < 0 && priv->fav_child_policy && frame && frame->local) {
+ local = frame->local;
+ *spb_subvol = afr_sh_get_fav_by_policy(this, local->replies, inode,
+ NULL);
+ if (*spb_subvol >= 0) {
+ ret = 0;
+ }
+ }
- if (inode->ia_type == IA_IFDIR) {
- /* For directories, allow even if it is in data split-brain. */
- if (type == AFR_METADATA_TRANSACTION) {
- if (!metadata_count)
- return -EIO;
- }
- } else {
- /* For files, abort in case of data/metadata split-brain. */
- if (!data_count || !metadata_count)
- return -EIO;
- }
-
- if (type == AFR_METADATA_TRANSACTION && readable)
- memcpy (readable, metadata, priv->child_count * sizeof *metadata);
- if (type == AFR_DATA_TRANSACTION && readable) {
- if (!data_count)
- memcpy (readable, local->child_up,
- priv->child_count * sizeof *readable);
- else
- memcpy (readable, data, priv->child_count * sizeof *data);
- }
- if (event_p)
- *event_p = event_generation;
- return 0;
+out:
+ return ret;
}
-
int
-afr_inode_split_brain_choice_get (inode_t *inode, xlator_t *this,
- int *spb_choice)
+afr_inode_read_subvol_set(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int event)
{
- int ret = -1;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_split_brain_choice_get (inode, this,
- spb_choice);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_read_subvol_set(inode, this, data, metadata, event);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
-
int
-afr_inode_read_subvol_set (inode_t *inode, xlator_t *this, unsigned char *data,
- unsigned char *metadata, int event)
+afr_inode_split_brain_choice_set(inode_t *inode, xlator_t *this, int spb_choice)
{
- int ret = -1;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_read_subvol_set (inode, this, data, metadata,
- event);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_split_brain_choice_set(inode, this, spb_choice);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
-
-int
-afr_inode_split_brain_choice_set (inode_t *inode, xlator_t *this,
- int spb_choice)
+/* The caller of this should perform afr_inode_refresh, if this function
+ * returns _gf_true
+ */
+gf_boolean_t
+afr_is_inode_refresh_reqd(inode_t *inode, xlator_t *this, int event_gen1,
+ int event_gen2)
{
- int ret = -1;
+ gf_boolean_t need_refresh = _gf_false;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_split_brain_choice_set (inode, this,
- spb_choice);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret)
+ goto unlock;
+
+ need_refresh = ctx->need_refresh;
+ /* Hoping that the caller will do inode_refresh followed by
+ * this, hence setting the need_refresh to false */
+ ctx->need_refresh = _gf_false;
+ }
+unlock:
+ UNLOCK(&inode->lock);
+
+ if (event_gen1 != event_gen2)
+ need_refresh = _gf_true;
out:
- return ret;
+ return need_refresh;
}
+int
+__afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
+{
+ int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret == 0) {
+ ctx->need_refresh = _gf_true;
+ }
+
+ return ret;
+}
int
-afr_inode_read_subvol_reset (inode_t *inode, xlator_t *this)
+afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
{
- int ret = -1;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_read_subvol_reset (inode, this);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_need_refresh_set(inode, this);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
int
-afr_spb_choice_timeout_cancel (xlator_t *this, inode_t *inode)
+afr_spb_choice_timeout_cancel(xlator_t *this, inode_t *inode)
{
- afr_inode_ctx_t *ctx = NULL;
- int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- if (!inode)
- return ret;
+ if (!inode)
+ return ret;
- LOCK(&inode->lock);
- {
- __afr_inode_ctx_get (this, inode, &ctx);
- if (!ctx) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to cancel"
- " split-brain choice timer.");
- goto out;
- }
- ctx->spb_choice = -1;
- if (ctx->timer) {
- gf_timer_call_cancel (this->ctx, ctx->timer);
- ctx->timer = NULL;
- }
- ret = 0;
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret < 0 || !ctx) {
+ UNLOCK(&inode->lock);
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
+ "Failed to cancel split-brain choice timer.");
+ goto out;
+ }
+ ctx->spb_choice = -1;
+ if (ctx->timer) {
+ gf_timer_call_cancel(this->ctx, ctx->timer);
+ ctx->timer = NULL;
}
+ ret = 0;
+ }
+ UNLOCK(&inode->lock);
out:
- UNLOCK(&inode->lock);
- return ret;
+ return ret;
}
void
-afr_set_split_brain_choice_cbk (void *data)
+afr_set_split_brain_choice_cbk(void *data)
{
- inode_t *inode = data;
- xlator_t *this = THIS;
+ inode_t *inode = data;
+ xlator_t *this = THIS;
- afr_spb_choice_timeout_cancel (this, inode);
- inode_unref (inode);
- return;
+ afr_spb_choice_timeout_cancel(this, inode);
+ inode_invalidate(inode);
+ inode_unref(inode);
+ return;
}
-
int
-afr_set_split_brain_choice (int ret, call_frame_t *frame, void *opaque)
-{
- int op_errno = ENOMEM;
- afr_private_t *priv = NULL;
- afr_inode_ctx_t *ctx = NULL;
- inode_t *inode = NULL;
- loc_t *loc = NULL;
- xlator_t *this = NULL;
- afr_spbc_timeout_t *data = opaque;
- struct timespec delta = {0, };
-
- if (ret)
- goto out;
-
- frame = data->frame;
- loc = data->loc;
- this = frame->this;
- priv = this->private;
-
- delta.tv_sec = priv->spb_choice_timeout;
- delta.tv_nsec = 0;
-
- inode = loc->inode;
- if (!inode)
- goto out;
-
- if (!(data->d_spb || data->m_spb)) {
- gf_log (this->name, GF_LOG_WARNING, "Cannot set "
- "replica.split-brain-choice on %s. File is"
- " not in data/metadata split-brain.",
- uuid_utoa (loc->gfid));
- ret = -1;
- op_errno = EINVAL;
- goto out;
+afr_set_split_brain_choice(int ret, call_frame_t *frame, void *opaque)
+{
+ int op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ afr_inode_ctx_t *ctx = NULL;
+ inode_t *inode = NULL;
+ loc_t *loc = NULL;
+ xlator_t *this = NULL;
+ afr_spbc_timeout_t *data = opaque;
+ struct timespec delta = {
+ 0,
+ };
+ gf_boolean_t timer_set = _gf_false;
+ gf_boolean_t timer_cancelled = _gf_false;
+ gf_boolean_t timer_reset = _gf_false;
+ int old_spb_choice = -1;
+
+ frame = data->frame;
+ loc = data->loc;
+ this = frame->this;
+ priv = this->private;
+
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+
+ delta.tv_sec = priv->spb_choice_timeout;
+ delta.tv_nsec = 0;
+
+ if (!loc->inode) {
+ ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ if (!(data->d_spb || data->m_spb)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
+ "Cannot set "
+ "replica.split-brain-choice on %s. File is"
+ " not in data/metadata split-brain.",
+ uuid_utoa(loc->gfid));
+ ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ /*
+ * we're ref'ing the inode before LOCK like it is done elsewhere in the
+ * code. If we ref after LOCK, coverity complains of possible deadlocks.
+ */
+ inode = inode_ref(loc->inode);
+
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret) {
+ UNLOCK(&inode->lock);
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
+ "Failed to get inode_ctx for %s", loc->name);
+ goto post_unlock;
}
- LOCK(&inode->lock);
- {
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get"
- "inode_ctx for %s", loc->name);
- goto unlock;
- }
+ old_spb_choice = ctx->spb_choice;
+ ctx->spb_choice = data->spb_child_index;
- ctx->spb_choice = data->spb_child_index;
-
- /* Possible changes in spb-choice :
- * -1 to valid : ref and inject timer
- *
- * valid to valid : cancel timer and inject new one
- *
- * valid to -1 : cancel timer and unref
- *
- * -1 to -1 : do not do anything
- */
+ /* Possible changes in spb-choice :
+ * valid to -1 : cancel timer and unref
+ * valid to valid : cancel timer and inject new one
+ * -1 to -1 : unref and do not do anything
+ * -1 to valid : inject timer
+ */
- /* ctx->timer is NULL iff previous value of
- * ctx->spb_choice is -1
- */
- if (ctx->timer) {
- if (ctx->spb_choice == -1) {
- gf_timer_call_cancel (this->ctx, ctx->timer);
- ctx->timer = NULL;
- inode_unref (inode);
- goto unlock;
- }
- goto reset_timer;
- } else {
- if (ctx->spb_choice == -1)
- goto unlock;
+ /* ctx->timer is NULL iff previous value of
+ * ctx->spb_choice is -1
+ */
+ if (ctx->timer) {
+ if (ctx->spb_choice == -1) {
+ if (!gf_timer_call_cancel(this->ctx, ctx->timer)) {
+ ctx->timer = NULL;
+ timer_cancelled = _gf_true;
}
-
- inode = inode_ref (loc->inode);
- goto set_timer;
-
-reset_timer:
- gf_timer_call_cancel (this->ctx, ctx->timer);
- ctx->timer = NULL;
-
-set_timer:
- ctx->timer = gf_timer_call_after (this->ctx, delta,
- afr_set_split_brain_choice_cbk,
- inode);
+ /* If timer cancel failed here it means that the
+ * previous cbk will be executed which will set
+ * spb_choice to -1. So we can consider the
+ * 'valid to -1' case to be a success
+ * (i.e. ret = 0) and goto unlock.
+ */
+ goto unlock;
+ }
+ goto reset_timer;
+ } else {
+ if (ctx->spb_choice == -1)
+ goto unlock;
+ goto set_timer;
}
+
+ reset_timer:
+ ret = gf_timer_call_cancel(this->ctx, ctx->timer);
+ if (ret != 0) {
+ /* We need to bail out now instead of launching a new
+ * timer. Otherwise the cbk of the previous timer event
+ * will cancel the new ctx->timer.
+ */
+ ctx->spb_choice = old_spb_choice;
+ ret = -1;
+ op_errno = EAGAIN;
+ goto unlock;
+ }
+ ctx->timer = NULL;
+ timer_reset = _gf_true;
+
+ set_timer:
+ ctx->timer = gf_timer_call_after(this->ctx, delta,
+ afr_set_split_brain_choice_cbk, inode);
+ if (!ctx->timer) {
+ ctx->spb_choice = old_spb_choice;
+ ret = -1;
+ op_errno = ENOMEM;
+ }
+ if (!timer_reset && ctx->timer)
+ timer_set = _gf_true;
+ if (timer_reset && !ctx->timer)
+ timer_cancelled = _gf_true;
+ }
unlock:
- UNLOCK(&inode->lock);
- inode_invalidate (inode);
+ UNLOCK(&inode->lock);
+post_unlock:
+ if (!timer_set)
+ inode_unref(inode);
+ if (timer_cancelled)
+ inode_unref(inode);
+ /*
+ * We need to invalidate the inode to prevent the kernel from serving
+ * reads from an older cached value despite a change in spb_choice to
+ * a new value.
+ */
+ inode_invalidate(inode);
out:
- if (data)
- GF_FREE (data);
- AFR_STACK_UNWIND (setxattr, frame, ret, op_errno, NULL);
- return 0;
+ GF_FREE(data);
+ AFR_STACK_UNWIND(setxattr, frame, ret, op_errno, NULL);
+ return 0;
}
int
-afr_accused_fill (xlator_t *this, dict_t *xdata, unsigned char *accused,
- afr_transaction_type type)
+afr_accused_fill(xlator_t *this, dict_t *xdata, unsigned char *accused,
+ afr_transaction_type type)
{
- afr_private_t *priv = NULL;
- int i = 0;
- int idx = afr_index_for_transaction_type (type);
- void *pending_raw = NULL;
- int pending[3];
- int ret = 0;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int idx = afr_index_for_transaction_type(type);
+ void *pending_raw = NULL;
+ int pending[3];
+ int ret = 0;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- ret = dict_get_ptr (xdata, priv->pending_key[i],
- &pending_raw);
- if (ret) /* no pending flags */
- continue;
- memcpy (pending, pending_raw, sizeof(pending));
+ for (i = 0; i < priv->child_count; i++) {
+ ret = dict_get_ptr(xdata, priv->pending_key[i], &pending_raw);
+ if (ret) /* no pending flags */
+ continue;
+ memcpy(pending, pending_raw, sizeof(pending));
- if (ntoh32 (pending[idx]))
- accused[i] = 1;
- }
+ if (ntoh32(pending[idx]))
+ accused[i] = 1;
+ }
- return 0;
+ return 0;
}
-
int
-afr_accuse_smallfiles (xlator_t *this, struct afr_reply *replies,
- unsigned char *data_accused)
+afr_accuse_smallfiles(xlator_t *this, struct afr_reply *replies,
+ unsigned char *data_accused)
{
- int i = 0;
- afr_private_t *priv = NULL;
- uint64_t maxsize = 0;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ uint64_t maxsize = 0;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (data_accused[i])
- continue;
- if (replies[i].poststat.ia_size > maxsize)
- maxsize = replies[i].poststat.ia_size;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid && replies[i].xdata &&
+ dict_get_sizen(replies[i].xdata, GLUSTERFS_BAD_INODE))
+ continue;
+ if (data_accused[i])
+ continue;
+ if (replies[i].poststat.ia_size > maxsize)
+ maxsize = replies[i].poststat.ia_size;
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (data_accused[i])
- continue;
- if (AFR_IS_ARBITER_BRICK(priv, i))
- continue;
- if (replies[i].poststat.ia_size < maxsize)
- data_accused[i] = 1;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (data_accused[i])
+ continue;
+ if (AFR_IS_ARBITER_BRICK(priv, i))
+ continue;
+ if (replies[i].poststat.ia_size < maxsize)
+ data_accused[i] = 1;
+ }
- return 0;
+ return 0;
}
-
int
-afr_replies_interpret (call_frame_t *frame, xlator_t *this, inode_t *inode)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- int event_generation = 0;
- int i = 0;
- unsigned char *data_accused = NULL;
- unsigned char *metadata_accused = NULL;
- unsigned char *data_readable = NULL;
- unsigned char *metadata_readable = NULL;
- int ret = 0;
-
- local = frame->local;
- priv = this->private;
- replies = local->replies;
- event_generation = local->event_generation;
-
- data_accused = alloca0 (priv->child_count);
- data_readable = alloca0 (priv->child_count);
- metadata_accused = alloca0 (priv->child_count);
- metadata_readable = alloca0 (priv->child_count);
-
- for (i = 0; i < priv->child_count; i++) {
- data_readable[i] = 1;
- metadata_readable[i] = 1;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid) {
- data_readable[i] = 0;
- metadata_readable[i] = 0;
- continue;
- }
-
- if (replies[i].op_ret == -1) {
- data_readable[i] = 0;
- metadata_readable[i] = 0;
- continue;
- }
-
- afr_accused_fill (this, replies[i].xdata, data_accused,
- (replies[i].poststat.ia_type == IA_IFDIR) ?
- AFR_ENTRY_TRANSACTION : AFR_DATA_TRANSACTION);
-
- afr_accused_fill (this, replies[i].xdata,
- metadata_accused, AFR_METADATA_TRANSACTION);
-
- }
-
- if (inode->ia_type != IA_IFDIR)
- afr_accuse_smallfiles (this, replies, data_accused);
-
- for (i = 0; i < priv->child_count; i++) {
- if (data_accused[i]) {
- data_readable[i] = 0;
- ret = 1;
- }
- if (metadata_accused[i]) {
- metadata_readable[i] = 0;
- ret = 1;
- }
- }
-
- afr_inode_read_subvol_set (inode, this, data_readable,
- metadata_readable, event_generation);
- return ret;
+afr_readables_fill(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ unsigned char *data_accused, unsigned char *metadata_accused,
+ unsigned char *data_readable,
+ unsigned char *metadata_readable, struct afr_reply *replies)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xdata = NULL;
+ int i = 0;
+ int ret = 0;
+ ia_type_t ia_type = IA_INVAL;
+
+ local = frame->local;
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ data_readable[i] = 1;
+ metadata_readable[i] = 1;
+ }
+ if (AFR_IS_ARBITER_BRICK(priv, ARBITER_BRICK_INDEX)) {
+ data_readable[ARBITER_BRICK_INDEX] = 0;
+ metadata_readable[ARBITER_BRICK_INDEX] = 0;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies) { /* Lookup */
+ if (!replies[i].valid || replies[i].op_ret == -1 ||
+ (replies[i].xdata &&
+ dict_get_sizen(replies[i].xdata, GLUSTERFS_BAD_INODE))) {
+ data_readable[i] = 0;
+ metadata_readable[i] = 0;
+ continue;
+ }
+
+ xdata = replies[i].xdata;
+ ia_type = replies[i].poststat.ia_type;
+ } else { /* pre-op xattrop */
+ xdata = local->transaction.changelog_xdata[i];
+ ia_type = inode->ia_type;
+ }
+
+ if (!xdata)
+ continue; /* mkdir_cbk sends NULL xdata_rsp. */
+ afr_accused_fill(this, xdata, data_accused,
+ (ia_type == IA_IFDIR) ? AFR_ENTRY_TRANSACTION
+ : AFR_DATA_TRANSACTION);
+
+ afr_accused_fill(this, xdata, metadata_accused,
+ AFR_METADATA_TRANSACTION);
+ }
+
+ if (replies && ia_type != IA_INVAL && ia_type != IA_IFDIR &&
+ /* We want to accuse small files only when we know for
+ * sure that there is no IO happening. Otherwise, the
+ * ia_sizes obtained in post-refresh replies may
+ * mismatch due to a race between inode-refresh and
+ * ongoing writes, causing spurious heal launches*/
+ !afr_is_possibly_under_txn(AFR_DATA_TRANSACTION, local, this)) {
+ afr_accuse_smallfiles(this, replies, data_accused);
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (data_accused[i]) {
+ data_readable[i] = 0;
+ ret = 1;
+ }
+ if (metadata_accused[i]) {
+ metadata_readable[i] = 0;
+ ret = 1;
+ }
+ }
+ return ret;
}
-
+int
+afr_replies_interpret(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ gf_boolean_t *start_heal)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ int event_generation = 0;
+ int i = 0;
+ unsigned char *data_accused = NULL;
+ unsigned char *metadata_accused = NULL;
+ unsigned char *data_readable = NULL;
+ unsigned char *metadata_readable = NULL;
+ int ret = 0;
+
+ local = frame->local;
+ priv = this->private;
+ replies = local->replies;
+ event_generation = local->event_generation;
+
+ data_accused = alloca0(priv->child_count);
+ data_readable = alloca0(priv->child_count);
+ metadata_accused = alloca0(priv->child_count);
+ metadata_readable = alloca0(priv->child_count);
+
+ ret = afr_readables_fill(frame, this, inode, data_accused, metadata_accused,
+ data_readable, metadata_readable, replies);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (start_heal && priv->child_up[i] &&
+ (data_accused[i] || metadata_accused[i])) {
+ *start_heal = _gf_true;
+ break;
+ }
+ }
+ afr_inode_read_subvol_set(inode, this, data_readable, metadata_readable,
+ event_generation);
+ return ret;
+}
int
-afr_refresh_selfheal_done (int ret, call_frame_t *heal, void *opaque)
+afr_refresh_selfheal_done(int ret, call_frame_t *heal, void *opaque)
{
- if (heal)
- STACK_DESTROY (heal->root);
- return 0;
+ if (heal)
+ AFR_STACK_DESTROY(heal);
+ return 0;
}
int
-afr_inode_refresh_err (call_frame_t *frame, xlator_t *this)
+afr_inode_refresh_err(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- int err = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int err = 0;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (local->replies[i].valid && !local->replies[i].op_ret) {
- err = 0;
- goto ret;
- }
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].valid && !local->replies[i].op_ret) {
+ err = 0;
+ goto ret;
+ }
+ }
- err = afr_final_errno (local, priv);
+ err = afr_final_errno(local, priv);
ret:
- return -err;
+ return err;
}
-
-int
-afr_refresh_selfheal_wrap (void *opaque)
+gf_boolean_t
+afr_selfheal_enabled(const xlator_t *this)
{
- call_frame_t *frame = opaque;
- afr_local_t *local = NULL;
- xlator_t *this = NULL;
- int err = 0;
+ const afr_private_t *priv = this->private;
- local = frame->local;
- this = frame->this;
+ return priv->data_self_heal || priv->metadata_self_heal ||
+ priv->entry_self_heal;
+}
- afr_selfheal (frame->this, local->refreshinode->gfid);
+int
+afr_txn_refresh_done(call_frame_t *frame, xlator_t *this, int err)
+{
+ call_frame_t *heal_frame = NULL;
+ afr_local_t *heal_local = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ inode_t *inode = NULL;
+ int event_generation = 0;
+ int read_subvol = -1;
+ int ret = 0;
+
+ local = frame->local;
+ inode = local->inode;
+ priv = this->private;
+
+ if (err)
+ goto refresh_done;
+
+ if (local->op == GF_FOP_LOOKUP)
+ goto refresh_done;
+
+ ret = afr_inode_get_readable(frame, inode, this, local->readable,
+ &event_generation, local->transaction.type);
+
+ if (ret == -EIO) {
+ /* No readable subvolume even after refresh ==> splitbrain.*/
+ if (!priv->fav_child_policy) {
+ err = EIO;
+ goto refresh_done;
+ }
+ read_subvol = afr_sh_get_fav_by_policy(this, local->replies, inode,
+ NULL);
+ if (read_subvol == -1) {
+ err = EIO;
+ goto refresh_done;
+ }
+
+ heal_frame = afr_frame_create(this, NULL);
+ if (!heal_frame) {
+ err = EIO;
+ goto refresh_done;
+ }
+ heal_local = heal_frame->local;
+ heal_local->xdata_req = dict_new();
+ if (!heal_local->xdata_req) {
+ err = EIO;
+ AFR_STACK_DESTROY(heal_frame);
+ goto refresh_done;
+ }
+ heal_local->heal_frame = frame;
+ ret = synctask_new(this->ctx->env, afr_fav_child_reset_sink_xattrs,
+ afr_fav_child_reset_sink_xattrs_cbk, heal_frame,
+ heal_frame);
+ return 0;
+ }
- afr_selfheal_unlocked_discover (frame, local->refreshinode,
- local->refreshinode->gfid,
- local->replies);
+refresh_done:
+ afr_local_replies_wipe(local, this->private);
+ local->refreshfn(frame, this, err);
- afr_replies_interpret (frame, this, local->refreshinode);
+ return 0;
+}
- err = afr_inode_refresh_err (frame, this);
+int
+afr_inode_refresh_done(call_frame_t *frame, xlator_t *this, int error)
+{
+ call_frame_t *heal_frame = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ gf_boolean_t start_heal = _gf_false;
+ afr_local_t *heal_local = NULL;
+ unsigned char *success_replies = NULL;
+ int ret = 0;
+
+ if (error != 0) {
+ goto refresh_done;
+ }
+
+ local = frame->local;
+ priv = this->private;
+ success_replies = alloca0(priv->child_count);
+ afr_fill_success_replies(local, priv, success_replies);
+
+ if (priv->thin_arbiter_count && local->is_read_txn &&
+ AFR_COUNT(success_replies, priv->child_count) != priv->child_count) {
+ /* We need to query the good bricks and/or thin-arbiter.*/
+ if (success_replies[0]) {
+ local->read_txn_query_child = AFR_CHILD_ZERO;
+ } else if (success_replies[1]) {
+ local->read_txn_query_child = AFR_CHILD_ONE;
+ }
+ error = EINVAL;
+ goto refresh_done;
+ }
+
+ if (!afr_has_quorum(success_replies, this, frame)) {
+ error = afr_final_errno(frame->local, this->private);
+ if (!error)
+ error = afr_quorum_errno(priv);
+ goto refresh_done;
+ }
+
+ ret = afr_replies_interpret(frame, this, local->refreshinode, &start_heal);
+
+ if (ret && afr_selfheal_enabled(this) && start_heal) {
+ heal_frame = afr_frame_create(this, NULL);
+ if (!heal_frame)
+ goto refresh_done;
+ heal_local = heal_frame->local;
+ heal_local->refreshinode = inode_ref(local->refreshinode);
+ heal_local->heal_frame = heal_frame;
+ if (!afr_throttled_selfheal(heal_frame, this)) {
+ AFR_STACK_DESTROY(heal_frame);
+ goto refresh_done;
+ }
+ }
+
+refresh_done:
+ afr_txn_refresh_done(frame, this, error);
+
+ return 0;
+}
- afr_local_replies_wipe (local, this->private);
+void
+afr_inode_refresh_subvol_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *buf,
+ dict_t *xdata, struct iatt *par)
+{
+ afr_local_t *local = NULL;
+ int call_child = (long)cookie;
+ int8_t need_heal = 1;
+ int call_count = 0;
+ int ret = 0;
+
+ local = frame->local;
+ local->replies[call_child].valid = 1;
+ local->replies[call_child].op_ret = op_ret;
+ local->replies[call_child].op_errno = op_errno;
+ if (op_ret != -1) {
+ local->replies[call_child].poststat = *buf;
+ if (par)
+ local->replies[call_child].postparent = *par;
+ if (xdata)
+ local->replies[call_child].xdata = dict_ref(xdata);
+ }
- local->refreshfn (frame, this, err);
+ if (xdata) {
+ ret = dict_get_int8(xdata, "link-count", &need_heal);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to get link count");
+ }
+ }
- return 0;
+ local->replies[call_child].need_heal = need_heal;
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ afr_set_need_heal(this, local);
+ ret = afr_inode_refresh_err(frame, this);
+ if (ret) {
+ gf_msg_debug(this->name, ret, "afr_inode_refresh_err failed");
+ }
+ afr_inode_refresh_done(frame, this, ret);
+ }
}
-
-gf_boolean_t
-afr_selfheal_enabled (xlator_t *this)
+int
+afr_inode_refresh_subvol_with_lookup_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata,
+ struct iatt *par)
{
- afr_private_t *priv = NULL;
- gf_boolean_t data = _gf_false;
- int ret = 0;
-
- priv = this->private;
-
- ret = gf_string2boolean (priv->data_self_heal, &data);
- GF_ASSERT (!ret);
-
- return data || priv->metadata_self_heal || priv->entry_self_heal;
+ afr_inode_refresh_subvol_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ xdata, par);
+ return 0;
}
-
int
-afr_inode_refresh_done (call_frame_t *frame, xlator_t *this)
+afr_inode_refresh_subvol_with_lookup(call_frame_t *frame, xlator_t *this, int i,
+ inode_t *inode, uuid_t gfid, dict_t *xdata)
{
- call_frame_t *heal = NULL;
- afr_local_t *local = NULL;
- int ret = 0;
- int err = 0;
-
- local = frame->local;
-
- ret = afr_replies_interpret (frame, this, local->refreshinode);
+ loc_t loc = {
+ 0,
+ };
+ afr_private_t *priv = NULL;
- err = afr_inode_refresh_err (frame, this);
+ priv = this->private;
- afr_local_replies_wipe (local, this->private);
+ loc.inode = inode;
+ if (gf_uuid_is_null(inode->gfid) && gfid) {
+ /* To handle setattr/setxattr on yet to be linked inode from
+ * dht */
+ gf_uuid_copy(loc.gfid, gfid);
+ } else {
+ gf_uuid_copy(loc.gfid, inode->gfid);
+ }
- if (ret && afr_selfheal_enabled (this)) {
- heal = copy_frame (frame);
- if (heal)
- heal->root->pid = GF_CLIENT_PID_AFR_SELF_HEALD;
- ret = synctask_new (this->ctx->env, afr_refresh_selfheal_wrap,
- afr_refresh_selfheal_done, heal, frame);
- if (ret)
- goto refresh_done;
- } else {
- refresh_done:
- local->refreshfn (frame, this, err);
- }
-
- return 0;
+ STACK_WIND_COOKIE(frame, afr_inode_refresh_subvol_with_lookup_cbk,
+ (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->lookup, &loc, xdata);
+ return 0;
}
-
int
-afr_inode_refresh_subvol_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, inode_t *inode,
- struct iatt *buf, dict_t *xdata, struct iatt *par)
+afr_inode_refresh_subvol_with_fstat_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_child = (long) cookie;
- int call_count = 0;
-
- local = frame->local;
-
- local->replies[call_child].valid = 1;
- local->replies[call_child].op_ret = op_ret;
- local->replies[call_child].op_errno = op_errno;
- if (op_ret != -1) {
- local->replies[call_child].poststat = *buf;
- local->replies[call_child].postparent = *par;
- local->replies[call_child].xdata = dict_ref (xdata);
- }
-
- call_count = afr_frame_return (frame);
-
- if (call_count == 0)
- afr_inode_refresh_done (frame, this);
-
- return 0;
+ afr_inode_refresh_subvol_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ xdata, NULL);
+ return 0;
}
-
int
-afr_inode_refresh_subvol (call_frame_t *frame, xlator_t *this, int i,
- inode_t *inode, dict_t *xdata)
+afr_inode_refresh_subvol_with_fstat(call_frame_t *frame, xlator_t *this, int i,
+ dict_t *xdata)
{
- loc_t loc = {0, };
- afr_private_t *priv = NULL;
-
- priv = this->private;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
- loc.inode = inode;
- gf_uuid_copy (loc.gfid, inode->gfid);
+ priv = this->private;
+ local = frame->local;
- STACK_WIND_COOKIE (frame, afr_inode_refresh_subvol_cbk,
- (void *) (long) i, priv->children[i],
- priv->children[i]->fops->lookup, &loc, xdata);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_inode_refresh_subvol_with_fstat_cbk,
+ (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->fstat, local->fd, xdata);
+ return 0;
}
-
int
-afr_inode_refresh_do (call_frame_t *frame, xlator_t *this)
+afr_inode_refresh_do(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = 0;
- int i = 0;
- dict_t *xdata = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+ int i = 0;
+ int ret = 0;
+ dict_t *xdata = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ unsigned char *wind_subvols = NULL;
- priv = this->private;
- local = frame->local;
+ priv = this->private;
+ local = frame->local;
+ wind_subvols = alloca0(priv->child_count);
- afr_local_replies_wipe (local, priv);
+ afr_local_replies_wipe(local, priv);
- xdata = dict_new ();
- if (!xdata) {
- afr_inode_refresh_done (frame, this);
- return 0;
- }
+ if (local->fd) {
+ fd_ctx = afr_fd_ctx_get(local->fd, this);
+ if (!fd_ctx) {
+ afr_inode_refresh_done(frame, this, EINVAL);
+ return 0;
+ }
+ }
- if (afr_xattr_req_prepare (this, xdata) != 0) {
- dict_unref (xdata);
- afr_inode_refresh_done (frame, this);
- return 0;
- }
+ xdata = dict_new();
+ if (!xdata) {
+ afr_inode_refresh_done(frame, this, ENOMEM);
+ return 0;
+ }
- local->call_count = AFR_COUNT (local->child_up, priv->child_count);
+ ret = afr_xattr_req_prepare(this, xdata);
+ if (ret != 0) {
+ dict_unref(xdata);
+ afr_inode_refresh_done(frame, this, -ret);
+ return 0;
+ }
- call_count = local->call_count;
- for (i = 0; i < priv->child_count; i++) {
- if (!local->child_up[i])
- continue;
+ ret = dict_set_sizen_str_sizen(xdata, "link-count", GF_XATTROP_INDEX_COUNT);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
+ }
- afr_inode_refresh_subvol (frame, this, i, local->refreshinode,
- xdata);
+ ret = dict_set_str_sizen(xdata, GLUSTERFS_INODELK_DOM_COUNT, this->name);
+ if (ret) {
+ gf_msg_debug(this->name, -ret,
+ "Unable to set inodelk-dom-count in dict ");
+ }
- if (!--call_count)
- break;
- }
+ if (local->fd) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i] && fd_ctx->opened_on[i] == AFR_FD_OPENED)
+ wind_subvols[i] = 1;
+ }
+ } else {
+ memcpy(wind_subvols, local->child_up,
+ sizeof(*local->child_up) * priv->child_count);
+ }
+
+ local->call_count = AFR_COUNT(wind_subvols, priv->child_count);
+
+ call_count = local->call_count;
+ if (!call_count) {
+ dict_unref(xdata);
+ if (local->fd && AFR_COUNT(local->child_up, priv->child_count))
+ afr_inode_refresh_done(frame, this, EBADFD);
+ else
+ afr_inode_refresh_done(frame, this, ENOTCONN);
+ return 0;
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!wind_subvols[i])
+ continue;
+
+ if (local->fd)
+ afr_inode_refresh_subvol_with_fstat(frame, this, i, xdata);
+ else
+ afr_inode_refresh_subvol_with_lookup(
+ frame, this, i, local->refreshinode, local->refreshgfid, xdata);
- dict_unref (xdata);
+ if (!--call_count)
+ break;
+ }
- return 0;
-}
+ dict_unref(xdata);
+ return 0;
+}
int
-afr_inode_refresh (call_frame_t *frame, xlator_t *this, inode_t *inode,
- afr_inode_refresh_cbk_t refreshfn)
+afr_inode_refresh(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ uuid_t gfid, afr_inode_refresh_cbk_t refreshfn)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- local->refreshfn = refreshfn;
+ local->refreshfn = refreshfn;
- if (local->refreshinode) {
- inode_unref (local->refreshinode);
- local->refreshinode = NULL;
- }
+ if (local->refreshinode) {
+ inode_unref(local->refreshinode);
+ local->refreshinode = NULL;
+ }
- local->refreshinode = inode_ref (inode);
+ local->refreshinode = inode_ref(inode);
- afr_inode_refresh_do (frame, this);
+ if (gfid)
+ gf_uuid_copy(local->refreshgfid, gfid);
+ else
+ gf_uuid_clear(local->refreshgfid);
- return 0;
-}
+ afr_inode_refresh_do(frame, this);
+ return 0;
+}
int
-afr_xattr_req_prepare (xlator_t *this, dict_t *xattr_req)
+afr_xattr_req_prepare(xlator_t *this, dict_t *xattr_req)
{
- int i = 0;
- afr_private_t *priv = NULL;
- int ret = 0;
-
- priv = this->private;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ int ret = 0;
- for (i = 0; i < priv->child_count; i++) {
- ret = dict_set_uint64 (xattr_req, priv->pending_key[i],
- AFR_NUM_CHANGE_LOGS * sizeof(int));
- if (ret < 0)
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Unable to set dict value for %s",
- priv->pending_key[i]);
- /* 3 = data+metadata+entry */
- }
- ret = dict_set_uint64 (xattr_req, AFR_DIRTY,
- AFR_NUM_CHANGE_LOGS * sizeof(int));
- if (ret) {
- gf_msg_debug (this->name, -ret, "failed to set dirty "
- "query flag");
- }
+ priv = this->private;
- ret = dict_set_int32 (xattr_req, "list-xattr", 1);
- if (ret) {
- gf_msg_debug (this->name, -ret,
- "Unable to set list-xattr in dict ");
- }
+ for (i = 0; i < priv->child_count; i++) {
+ ret = dict_set_uint64(xattr_req, priv->pending_key[i],
+ AFR_NUM_CHANGE_LOGS * sizeof(int));
+ if (ret < 0)
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Unable to set dict value for %s", priv->pending_key[i]);
+ /* 3 = data+metadata+entry */
+ }
+ ret = dict_set_uint64(xattr_req, AFR_DIRTY,
+ AFR_NUM_CHANGE_LOGS * sizeof(int));
+ if (ret) {
+ gf_msg_debug(this->name, -ret,
+ "failed to set dirty "
+ "query flag");
+ }
+
+ ret = dict_set_int32_sizen(xattr_req, "list-xattr", 1);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set list-xattr in dict ");
+ }
+
+ return ret;
+}
- return ret;
+int
+afr_lookup_xattr_req_prepare(afr_local_t *local, xlator_t *this,
+ dict_t *xattr_req, loc_t *loc)
+{
+ int ret = -ENOMEM;
+
+ if (!local->xattr_req)
+ local->xattr_req = dict_new();
+
+ if (!local->xattr_req)
+ goto out;
+
+ if (xattr_req && (xattr_req != local->xattr_req))
+ dict_copy(xattr_req, local->xattr_req);
+
+ ret = afr_xattr_req_prepare(this, local->xattr_req);
+
+ ret = dict_set_uint64(local->xattr_req, GLUSTERFS_INODELK_COUNT, 0);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Unable to set dict value for %s", loc->path,
+ GLUSTERFS_INODELK_COUNT);
+ }
+ ret = dict_set_uint64(local->xattr_req, GLUSTERFS_ENTRYLK_COUNT, 0);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Unable to set dict value for %s", loc->path,
+ GLUSTERFS_ENTRYLK_COUNT);
+ }
+
+ ret = dict_set_uint32(local->xattr_req, GLUSTERFS_PARENT_ENTRYLK, 0);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Unable to set dict value for %s", loc->path,
+ GLUSTERFS_PARENT_ENTRYLK);
+ }
+
+ ret = dict_set_sizen_str_sizen(local->xattr_req, "link-count",
+ GF_XATTROP_INDEX_COUNT);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
+ }
+
+ ret = 0;
+out:
+ return ret;
}
int
-afr_lookup_xattr_req_prepare (afr_local_t *local, xlator_t *this,
- dict_t *xattr_req, loc_t *loc)
+afr_least_pending_reads_child(afr_private_t *priv, unsigned char *readable)
{
- int ret = -ENOMEM;
+ int i = 0;
+ int child = -1;
+ int64_t read_iter = -1;
+ int64_t pending_read = -1;
- if (!local->xattr_req)
- local->xattr_req = dict_new ();
-
- if (!local->xattr_req)
- goto out;
+ for (i = 0; i < priv->child_count; i++) {
+ if (AFR_IS_ARBITER_BRICK(priv, i) || !readable[i])
+ continue;
+ read_iter = GF_ATOMIC_GET(priv->pending_reads[i]);
+ if (child == -1 || read_iter < pending_read) {
+ pending_read = read_iter;
+ child = i;
+ }
+ }
- if (xattr_req && (xattr_req != local->xattr_req))
- dict_copy (xattr_req, local->xattr_req);
+ return child;
+}
- ret = afr_xattr_req_prepare (this, local->xattr_req);
+static int32_t
+afr_least_latency_child(afr_private_t *priv, unsigned char *readable)
+{
+ int32_t i = 0;
+ int child = -1;
- ret = dict_set_uint64 (local->xattr_req, GLUSTERFS_INODELK_COUNT, 0);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "%s: Unable to set dict value for %s",
- loc->path, GLUSTERFS_INODELK_COUNT);
- }
- ret = dict_set_uint64 (local->xattr_req, GLUSTERFS_ENTRYLK_COUNT, 0);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "%s: Unable to set dict value for %s",
- loc->path, GLUSTERFS_ENTRYLK_COUNT);
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (AFR_IS_ARBITER_BRICK(priv, i) || !readable[i] ||
+ priv->child_latency[i] < 0)
+ continue;
- ret = dict_set_uint32 (local->xattr_req, GLUSTERFS_PARENT_ENTRYLK, 0);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "%s: Unable to set dict value for %s",
- loc->path, GLUSTERFS_PARENT_ENTRYLK);
+ if (child == -1 ||
+ priv->child_latency[i] < priv->child_latency[child]) {
+ child = i;
}
-
- ret = 0;
-out:
- return ret;
+ }
+ return child;
}
-
-int
-afr_hash_child (afr_read_subvol_args_t *args, int32_t child_count, int hashmode)
+static int32_t
+afr_least_latency_times_pending_reads_child(afr_private_t *priv,
+ unsigned char *readable)
{
- uuid_t gfid_copy = {0,};
- pid_t pid;
+ int32_t i = 0;
+ int child = -1;
+ int64_t pending_read = 0;
+ int64_t latency = -1;
+ int64_t least_latency = -1;
- if (!hashmode) {
- return -1;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (AFR_IS_ARBITER_BRICK(priv, i) || !readable[i] ||
+ priv->child_latency[i] < 0)
+ continue;
- gf_uuid_copy (gfid_copy, args->gfid);
+ pending_read = GF_ATOMIC_GET(priv->pending_reads[i]);
+ latency = (pending_read + 1) * priv->child_latency[i];
+
+ if (child == -1 || latency < least_latency) {
+ least_latency = latency;
+ child = i;
+ }
+ }
+ return child;
+}
- if ((hashmode > 1) && (args->ia_type != IA_IFDIR)) {
+int
+afr_hash_child(afr_read_subvol_args_t *args, afr_private_t *priv,
+ unsigned char *readable)
+{
+ uuid_t gfid_copy = {
+ 0,
+ };
+ pid_t pid;
+ int child = -1;
+
+ switch (priv->hash_mode) {
+ case AFR_READ_POLICY_FIRST_UP:
+ break;
+ case AFR_READ_POLICY_GFID_HASH:
+ gf_uuid_copy(gfid_copy, args->gfid);
+ child = SuperFastHash((char *)gfid_copy, sizeof(gfid_copy)) %
+ priv->child_count;
+ break;
+ case AFR_READ_POLICY_GFID_PID_HASH:
+ if (args->ia_type != IA_IFDIR) {
/*
* Why getpid? Because it's one of the cheapest calls
- * available - faster than gethostname etc. - and returns a
- * constant-length value that's sure to be shorter than a UUID.
- * It's still very unlikely to be the same across clients, so
- * it still provides good mixing. We're not trying for
- * perfection here. All we need is a low probability that
- * multiple clients won't converge on the same subvolume.
+ * available - faster than gethostname etc. - and
+ * returns a constant-length value that's sure to be
+ * shorter than a UUID. It's still very unlikely to be
+ * the same across clients, so it still provides good
+ * mixing. We're not trying for perfection here. All we
+ * need is a low probability that multiple clients
+ * won't converge on the same subvolume.
*/
+ gf_uuid_copy(gfid_copy, args->gfid);
pid = getpid();
- memcpy (gfid_copy, &pid, sizeof(pid));
- }
-
- return SuperFastHash((char *)gfid_copy,
- sizeof(gfid_copy)) % child_count;
+ *(pid_t *)gfid_copy ^= pid;
+ }
+ child = SuperFastHash((char *)gfid_copy, sizeof(gfid_copy)) %
+ priv->child_count;
+ break;
+ case AFR_READ_POLICY_LESS_LOAD:
+ child = afr_least_pending_reads_child(priv, readable);
+ break;
+ case AFR_READ_POLICY_LEAST_LATENCY:
+ child = afr_least_latency_child(priv, readable);
+ break;
+ case AFR_READ_POLICY_LOAD_LATENCY_HYBRID:
+ child = afr_least_latency_times_pending_reads_child(priv, readable);
+ break;
+ }
+
+ return child;
}
-
int
-afr_read_subvol_select_by_policy (inode_t *inode, xlator_t *this,
- unsigned char *readable,
- afr_read_subvol_args_t *args)
+afr_read_subvol_select_by_policy(inode_t *inode, xlator_t *this,
+ unsigned char *readable,
+ afr_read_subvol_args_t *args)
{
- int i = 0;
- int read_subvol = -1;
- afr_private_t *priv = NULL;
- afr_read_subvol_args_t local_args = {0,};
+ int i = 0;
+ int read_subvol = -1;
+ afr_private_t *priv = NULL;
+ afr_read_subvol_args_t local_args = {
+ 0,
+ };
- priv = this->private;
+ priv = this->private;
- /* first preference - explicitly specified or local subvolume */
- if (priv->read_child >= 0 && readable[priv->read_child])
- return priv->read_child;
+ /* first preference - explicitly specified or local subvolume */
+ if (priv->read_child >= 0 && readable[priv->read_child])
+ return priv->read_child;
- if (inode_is_linked (inode)) {
- gf_uuid_copy (local_args.gfid, inode->gfid);
- local_args.ia_type = inode->ia_type;
- } else if (args) {
- local_args = *args;
- }
+ if (inode_is_linked(inode)) {
+ gf_uuid_copy(local_args.gfid, inode->gfid);
+ local_args.ia_type = inode->ia_type;
+ } else if (args) {
+ local_args = *args;
+ }
- /* second preference - use hashed mode */
- read_subvol = afr_hash_child (&local_args, priv->child_count,
- priv->hash_mode);
- if (read_subvol >= 0 && readable[read_subvol])
- return read_subvol;
+ /* second preference - use hashed mode */
+ read_subvol = afr_hash_child(&local_args, priv, readable);
+ if (read_subvol >= 0 && readable[read_subvol])
+ return read_subvol;
- for (i = 0; i < priv->child_count; i++) {
- if (readable[i])
- return i;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (readable[i])
+ return i;
+ }
- /* no readable subvolumes, either split brain or all subvols down */
+ /* no readable subvolumes, either split brain or all subvols down */
- return -1;
+ return -1;
}
-
int
-afr_inode_read_subvol_type_get (inode_t *inode, xlator_t *this,
- unsigned char *readable, int *event_p,
- int type)
+afr_inode_read_subvol_type_get(inode_t *inode, xlator_t *this,
+ unsigned char *readable, int *event_p, int type)
{
- int ret = -1;
+ int ret = -1;
- if (type == AFR_METADATA_TRANSACTION)
- ret = afr_inode_read_subvol_get (inode, this, 0, readable,
- event_p);
- else
- ret = afr_inode_read_subvol_get (inode, this, readable, 0,
- event_p);
- return ret;
+ if (type == AFR_METADATA_TRANSACTION)
+ ret = afr_inode_read_subvol_get(inode, this, 0, readable, event_p);
+ else
+ ret = afr_inode_read_subvol_get(inode, this, readable, 0, event_p);
+ return ret;
}
+void
+afr_readables_intersect_get(inode_t *inode, xlator_t *this, int *event,
+ unsigned char *intersection)
+{
+ afr_private_t *priv = NULL;
+ unsigned char *data_readable = NULL;
+ unsigned char *metadata_readable = NULL;
+ unsigned char *intersect = NULL;
+
+ priv = this->private;
+ data_readable = alloca0(priv->child_count);
+ metadata_readable = alloca0(priv->child_count);
+ intersect = alloca0(priv->child_count);
+
+ afr_inode_read_subvol_get(inode, this, data_readable, metadata_readable,
+ event);
+
+ AFR_INTERSECT(intersect, data_readable, metadata_readable,
+ priv->child_count);
+ if (intersection)
+ memcpy(intersection, intersect,
+ sizeof(*intersection) * priv->child_count);
+}
int
-afr_read_subvol_get (inode_t *inode, xlator_t *this, int *subvol_p,
- int *event_p, afr_transaction_type type,
- afr_read_subvol_args_t *args)
+afr_read_subvol_get(inode_t *inode, xlator_t *this, int *subvol_p,
+ unsigned char *readables, int *event_p,
+ afr_transaction_type type, afr_read_subvol_args_t *args)
{
- afr_private_t *priv = NULL;
- unsigned char *data_readable = NULL;
- unsigned char *metadata_readable = NULL;
- unsigned char *readable = NULL;
- unsigned char *intersection = NULL;
- int subvol = -1;
- int event = 0;
-
- priv = this->private;
+ afr_private_t *priv = NULL;
+ unsigned char *readable = NULL;
+ unsigned char *intersection = NULL;
+ int subvol = -1;
+ int event = 0;
- readable = alloca0 (priv->child_count);
- data_readable = alloca0 (priv->child_count);
- metadata_readable = alloca0 (priv->child_count);
- intersection = alloca0 (priv->child_count);
+ priv = this->private;
- afr_inode_read_subvol_type_get (inode, this, readable, &event, type);
+ readable = alloca0(priv->child_count);
+ intersection = alloca0(priv->child_count);
- afr_inode_read_subvol_get (inode, this, data_readable, metadata_readable,
- &event);
+ afr_inode_read_subvol_type_get(inode, this, readable, &event, type);
- AFR_INTERSECT (intersection, data_readable, metadata_readable,
- priv->child_count);
+ afr_readables_intersect_get(inode, this, &event, intersection);
- if (AFR_COUNT (intersection, priv->child_count) > 0)
- subvol = afr_read_subvol_select_by_policy (inode, this,
- intersection, args);
- else
- subvol = afr_read_subvol_select_by_policy (inode, this,
- readable, args);
- if (subvol_p)
- *subvol_p = subvol;
- if (event_p)
- *event_p = event;
- return subvol;
+ if (AFR_COUNT(intersection, priv->child_count) > 0)
+ subvol = afr_read_subvol_select_by_policy(inode, this, intersection,
+ args);
+ else
+ subvol = afr_read_subvol_select_by_policy(inode, this, readable, args);
+ if (subvol_p)
+ *subvol_p = subvol;
+ if (event_p)
+ *event_p = event;
+ if (readables)
+ memcpy(readables, readable, sizeof(*readables) * priv->child_count);
+ return subvol;
}
-
void
-afr_local_transaction_cleanup (afr_local_t *local, xlator_t *this)
+afr_local_transaction_cleanup(afr_local_t *local, xlator_t *this)
{
- afr_private_t *priv = NULL;
- int i = 0;
-
- priv = this->private;
+ afr_private_t *priv = NULL;
+ int i = 0;
- afr_matrix_cleanup (local->pending, priv->child_count);
+ priv = this->private;
- GF_FREE (local->internal_lock.locked_nodes);
+ afr_matrix_cleanup(local->pending, priv->child_count);
- for (i = 0; local->internal_lock.inodelk[i].domain; i++) {
- GF_FREE (local->internal_lock.inodelk[i].locked_nodes);
- }
+ GF_FREE(local->internal_lock.lower_locked_nodes);
- GF_FREE (local->internal_lock.lower_locked_nodes);
+ afr_lockees_cleanup(&local->internal_lock);
- afr_entry_lockee_cleanup (&local->internal_lock);
+ GF_FREE(local->transaction.pre_op);
- GF_FREE (local->transaction.pre_op);
-
- GF_FREE (local->transaction.pre_op_sources);
- if (local->transaction.pre_op_xdata) {
- for (i = 0; i < priv->child_count; i++) {
- if (!local->transaction.pre_op_xdata[i])
- continue;
- dict_unref (local->transaction.pre_op_xdata[i]);
- }
- GF_FREE (local->transaction.pre_op_xdata);
+ GF_FREE(local->transaction.pre_op_sources);
+ if (local->transaction.changelog_xdata) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.changelog_xdata[i])
+ continue;
+ dict_unref(local->transaction.changelog_xdata[i]);
}
+ GF_FREE(local->transaction.changelog_xdata);
+ }
- GF_FREE (local->transaction.eager_lock);
- GF_FREE (local->transaction.fop_subvols);
- GF_FREE (local->transaction.failed_subvols);
+ GF_FREE(local->transaction.failed_subvols);
- GF_FREE (local->transaction.basename);
- GF_FREE (local->transaction.new_basename);
-
- loc_wipe (&local->transaction.parent_loc);
- loc_wipe (&local->transaction.new_parent_loc);
+ GF_FREE(local->transaction.basename);
+ GF_FREE(local->transaction.new_basename);
+ loc_wipe(&local->transaction.parent_loc);
+ loc_wipe(&local->transaction.new_parent_loc);
}
+void
+afr_reply_wipe(struct afr_reply *reply)
+{
+ if (reply->xdata) {
+ dict_unref(reply->xdata);
+ reply->xdata = NULL;
+ }
+
+ if (reply->xattr) {
+ dict_unref(reply->xattr);
+ reply->xattr = NULL;
+ }
+}
void
-afr_replies_wipe (struct afr_reply *replies, int count)
+afr_replies_wipe(struct afr_reply *replies, int count)
{
- int i = 0;
+ int i = 0;
- for (i = 0; i < count; i++) {
- if (replies[i].xdata) {
- dict_unref (replies[i].xdata);
- replies[i].xdata = NULL;
- }
- }
+ for (i = 0; i < count; i++) {
+ afr_reply_wipe(&replies[i]);
+ }
}
void
-afr_local_replies_wipe (afr_local_t *local, afr_private_t *priv)
+afr_local_replies_wipe(afr_local_t *local, afr_private_t *priv)
{
+ if (!local->replies)
+ return;
- if (!local->replies)
- return;
+ afr_replies_wipe(local->replies, priv->child_count);
- afr_replies_wipe (local->replies, priv->child_count);
+ memset(local->replies, 0, sizeof(*local->replies) * priv->child_count);
+}
- memset (local->replies, 0, sizeof(*local->replies) * priv->child_count);
+static gf_boolean_t
+afr_fop_lock_is_unlock(call_frame_t *frame)
+{
+ afr_local_t *local = frame->local;
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ if ((F_UNLCK == local->cont.inodelk.in_flock.l_type) &&
+ (local->cont.inodelk.in_cmd == F_SETLKW ||
+ local->cont.inodelk.in_cmd == F_SETLK))
+ return _gf_true;
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ if (ENTRYLK_UNLOCK == local->cont.entrylk.in_cmd)
+ return _gf_true;
+ break;
+ default:
+ return _gf_false;
+ }
+ return _gf_false;
}
-void
-afr_remove_eager_lock_stub (afr_local_t *local)
-{
- LOCK (&local->fd->lock);
- {
- list_del_init (&local->transaction.eager_locked);
- }
- UNLOCK (&local->fd->lock);
+static gf_boolean_t
+afr_lk_is_unlock(int32_t cmd, struct gf_flock *flock)
+{
+ switch (cmd) {
+ case F_RESLK_UNLCK:
+ return _gf_true;
+ break;
+
+#if F_SETLKW != F_SETLKW64
+ case F_SETLKW64:
+#endif
+ case F_SETLKW:
+
+#if F_SETLK != F_SETLK64
+ case F_SETLK64:
+#endif
+ case F_SETLK:
+ if (F_UNLCK == flock->l_type)
+ return _gf_true;
+ break;
+ default:
+ return _gf_false;
+ }
+ return _gf_false;
}
void
-afr_local_cleanup (afr_local_t *local, xlator_t *this)
+afr_handle_inconsistent_fop(call_frame_t *frame, int32_t *op_ret,
+ int32_t *op_errno)
{
- afr_private_t * priv = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
- if (!local)
- return;
+ if (!frame || !frame->this || !frame->local || !frame->this->private)
+ return;
- syncbarrier_destroy (&local->barrier);
+ if (*op_ret < 0)
+ return;
- if (local->transaction.eager_lock_on &&
- !list_empty (&local->transaction.eager_locked))
- afr_remove_eager_lock_stub (local);
+ /* Failing inodelk/entrylk/lk here is not a good idea because we
+ * need to cleanup the locks on the other bricks if we choose to fail
+ * the fop here. The brick may go down just after unwind happens as well
+ * so anyways the fop will fail when the next fop is sent so leaving
+ * it like this for now.*/
+ local = frame->local;
+ switch (local->op) {
+ case GF_FOP_LOOKUP:
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ case GF_FOP_LK:
+ return;
+ default:
+ break;
+ }
- afr_local_transaction_cleanup (local, this);
+ priv = frame->this->private;
+ if (!priv->consistent_io)
+ return;
- priv = this->private;
+ if (local->event_generation &&
+ (local->event_generation != priv->event_generation))
+ goto inconsistent;
- loc_wipe (&local->loc);
- loc_wipe (&local->newloc);
+ return;
+inconsistent:
+ *op_ret = -1;
+ *op_errno = ENOTCONN;
+}
- if (local->fd)
- fd_unref (local->fd);
+void
+afr_local_cleanup(afr_local_t *local, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
- if (local->xattr_req)
- dict_unref (local->xattr_req);
+ if (!local)
+ return;
- if (local->dict)
- dict_unref (local->dict);
+ syncbarrier_destroy(&local->barrier);
- afr_local_replies_wipe (local, priv);
- GF_FREE(local->replies);
+ afr_local_transaction_cleanup(local, this);
- GF_FREE (local->child_up);
+ priv = this->private;
- GF_FREE (local->read_attempted);
+ loc_wipe(&local->loc);
+ loc_wipe(&local->newloc);
- GF_FREE (local->readable);
+ if (local->fd)
+ fd_unref(local->fd);
- if (local->inode)
- inode_unref (local->inode);
+ if (local->xattr_req)
+ dict_unref(local->xattr_req);
- if (local->parent)
- inode_unref (local->parent);
+ if (local->xattr_rsp)
+ dict_unref(local->xattr_rsp);
- if (local->parent2)
- inode_unref (local->parent2);
+ if (local->dict)
+ dict_unref(local->dict);
- if (local->refreshinode)
- inode_unref (local->refreshinode);
+ afr_local_replies_wipe(local, priv);
+ GF_FREE(local->replies);
- { /* getxattr */
- GF_FREE (local->cont.getxattr.name);
- }
+ GF_FREE(local->child_up);
- { /* lk */
- GF_FREE (local->cont.lk.locked_nodes);
- }
+ GF_FREE(local->read_attempted);
+
+ GF_FREE(local->readable);
+ GF_FREE(local->readable2);
+
+ if (local->inode)
+ inode_unref(local->inode);
- { /* create */
- if (local->cont.create.fd)
- fd_unref (local->cont.create.fd);
- if (local->cont.create.params)
- dict_unref (local->cont.create.params);
- }
+ if (local->parent)
+ inode_unref(local->parent);
- { /* mknod */
- if (local->cont.mknod.params)
- dict_unref (local->cont.mknod.params);
- }
+ if (local->parent2)
+ inode_unref(local->parent2);
- { /* mkdir */
- if (local->cont.mkdir.params)
- dict_unref (local->cont.mkdir.params);
- }
+ if (local->refreshinode)
+ inode_unref(local->refreshinode);
- { /* symlink */
- if (local->cont.symlink.params)
- dict_unref (local->cont.symlink.params);
- }
+ { /* getxattr */
+ GF_FREE(local->cont.getxattr.name);
+ }
- { /* writev */
- GF_FREE (local->cont.writev.vector);
- if (local->cont.writev.iobref)
- iobref_unref (local->cont.writev.iobref);
- }
+ { /* lk */
+ GF_FREE(local->cont.lk.locked_nodes);
+ GF_FREE(local->cont.lk.dom_locked_nodes);
+ GF_FREE(local->cont.lk.dom_lock_op_ret);
+ GF_FREE(local->cont.lk.dom_lock_op_errno);
+ }
- { /* setxattr */
- if (local->cont.setxattr.dict)
- dict_unref (local->cont.setxattr.dict);
- }
+ { /* create */
+ if (local->cont.create.fd)
+ fd_unref(local->cont.create.fd);
+ if (local->cont.create.params)
+ dict_unref(local->cont.create.params);
+ }
- { /* fsetxattr */
- if (local->cont.fsetxattr.dict)
- dict_unref (local->cont.fsetxattr.dict);
- }
+ { /* mknod */
+ if (local->cont.mknod.params)
+ dict_unref(local->cont.mknod.params);
+ }
- { /* removexattr */
- GF_FREE (local->cont.removexattr.name);
- }
- { /* xattrop */
- if (local->cont.xattrop.xattr)
- dict_unref (local->cont.xattrop.xattr);
- }
- { /* fxattrop */
- if (local->cont.fxattrop.xattr)
- dict_unref (local->cont.fxattrop.xattr);
- }
- { /* symlink */
- GF_FREE (local->cont.symlink.linkpath);
- }
+ { /* mkdir */
+ if (local->cont.mkdir.params)
+ dict_unref(local->cont.mkdir.params);
+ }
- { /* opendir */
- GF_FREE (local->cont.opendir.checksum);
- }
+ { /* symlink */
+ if (local->cont.symlink.params)
+ dict_unref(local->cont.symlink.params);
+ }
- { /* readdirp */
- if (local->cont.readdir.dict)
- dict_unref (local->cont.readdir.dict);
- }
+ { /* writev */
+ GF_FREE(local->cont.writev.vector);
+ if (local->cont.writev.iobref)
+ iobref_unref(local->cont.writev.iobref);
+ }
- { /* inodelk */
- GF_FREE (local->cont.inodelk.volume);
- }
+ { /* setxattr */
+ if (local->cont.setxattr.dict)
+ dict_unref(local->cont.setxattr.dict);
+ }
- if (local->xdata_req)
- dict_unref (local->xdata_req);
+ { /* fsetxattr */
+ if (local->cont.fsetxattr.dict)
+ dict_unref(local->cont.fsetxattr.dict);
+ }
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
-}
+ { /* removexattr */
+ GF_FREE(local->cont.removexattr.name);
+ }
+ { /* xattrop */
+ if (local->cont.xattrop.xattr)
+ dict_unref(local->cont.xattrop.xattr);
+ }
+ { /* symlink */
+ GF_FREE(local->cont.symlink.linkpath);
+ }
+ { /* opendir */
+ GF_FREE(local->cont.opendir.checksum);
+ }
+
+ { /* open */
+ if (local->cont.open.fd)
+ fd_unref(local->cont.open.fd);
+ }
+
+ { /* readdirp */
+ if (local->cont.readdir.dict)
+ dict_unref(local->cont.readdir.dict);
+ }
+
+ { /* inodelk */
+ GF_FREE(local->cont.inodelk.volume);
+ if (local->cont.inodelk.xdata)
+ dict_unref(local->cont.inodelk.xdata);
+ }
+
+ { /* entrylk */
+ GF_FREE(local->cont.entrylk.volume);
+ GF_FREE(local->cont.entrylk.basename);
+ if (local->cont.entrylk.xdata)
+ dict_unref(local->cont.entrylk.xdata);
+ }
+
+ if (local->xdata_req)
+ dict_unref(local->xdata_req);
+
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+}
int
-afr_frame_return (call_frame_t *frame)
+afr_frame_return(call_frame_t *frame)
{
- afr_local_t *local = NULL;
- int call_count = 0;
+ afr_local_t *local = NULL;
+ int call_count = 0;
- local = frame->local;
+ local = frame->local;
- LOCK (&frame->lock);
- {
- call_count = --local->call_count;
- }
- UNLOCK (&frame->lock);
+ LOCK(&frame->lock);
+ {
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
- return call_count;
+ return call_count;
}
+static char *afr_ignore_xattrs[] = {GF_SELINUX_XATTR_KEY, QUOTA_SIZE_KEY, NULL};
gf_boolean_t
-afr_is_entry_possibly_under_txn (afr_local_t *local, xlator_t *this)
+afr_is_xattr_ignorable(char *key)
{
- int i = 0;
- int tmp = 0;
- afr_private_t *priv = NULL;
-
- priv = this->private;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].xdata)
- continue;
- if (dict_get_int32 (local->replies[i].xdata,
- GLUSTERFS_PARENT_ENTRYLK,
- &tmp) == 0)
- if (tmp)
- return _gf_true;
- }
+ int i = 0;
- return _gf_false;
+ if (!strncmp(key, AFR_XATTR_PREFIX, SLEN(AFR_XATTR_PREFIX)))
+ return _gf_true;
+ for (i = 0; afr_ignore_xattrs[i]; i++) {
+ if (!strcmp(key, afr_ignore_xattrs[i]))
+ return _gf_true;
+ }
+ return _gf_false;
}
+static gf_boolean_t
+afr_xattr_match_needed(dict_t *this, char *key1, data_t *value1, void *data)
+{
+ /* Ignore all non-disk (i.e. virtual) xattrs right away. */
+ if (!gf_is_valid_xattr_namespace(key1))
+ return _gf_false;
+
+ /* Ignore on-disk xattrs that AFR doesn't need to heal. */
+ if (!afr_is_xattr_ignorable(key1))
+ return _gf_true;
-static char *afr_ignore_xattrs[] = {
- GLUSTERFS_OPEN_FD_COUNT,
- GLUSTERFS_PARENT_ENTRYLK,
- GLUSTERFS_ENTRYLK_COUNT,
- GLUSTERFS_INODELK_COUNT,
- GF_SELINUX_XATTR_KEY,
- QUOTA_SIZE_KEY,
- NULL
-};
+ return _gf_false;
+}
gf_boolean_t
-afr_is_xattr_ignorable (char *key)
+afr_xattrs_are_equal(dict_t *dict1, dict_t *dict2)
{
- int i = 0;
+ return are_dicts_equal(dict1, dict2, afr_xattr_match_needed, NULL);
+}
- if (!strncmp (key, AFR_XATTR_PREFIX, strlen(AFR_XATTR_PREFIX)))
- return _gf_true;
- for (i = 0; afr_ignore_xattrs[i]; i++) {
- if (!strcmp (key, afr_ignore_xattrs[i]))
- return _gf_true;
+static int
+afr_get_parent_read_subvol(xlator_t *this, inode_t *parent,
+ struct afr_reply *replies, unsigned char *readable)
+{
+ int i = 0;
+ int par_read_subvol = -1;
+ int par_read_subvol_iter = -1;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (parent)
+ par_read_subvol = afr_data_subvol_get(parent, this, NULL, NULL, NULL,
+ NULL);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+
+ if (replies[i].op_ret < 0)
+ continue;
+
+ if (par_read_subvol_iter == -1) {
+ par_read_subvol_iter = i;
+ continue;
}
- return _gf_false;
+
+ if ((par_read_subvol_iter != par_read_subvol) && readable[i])
+ par_read_subvol_iter = i;
+
+ if (i == par_read_subvol)
+ par_read_subvol_iter = i;
+ }
+ /* At the end of the for-loop, the only reason why @par_read_subvol_iter
+ * could be -1 is when this LOOKUP has failed on all sub-volumes.
+ * So it is okay to send an arbitrary subvolume (0 in this case)
+ * as parent read subvol.
+ */
+ if (par_read_subvol_iter == -1)
+ par_read_subvol_iter = 0;
+
+ return par_read_subvol_iter;
}
-static gf_boolean_t
-afr_xattr_match (dict_t *this, char *key1, data_t *value1, void *data)
+int
+afr_read_subvol_decide(inode_t *inode, xlator_t *this,
+ afr_read_subvol_args_t *args, unsigned char *readable)
{
- if (!afr_is_xattr_ignorable (key1))
- return _gf_true;
+ int event = 0;
+ afr_private_t *priv = NULL;
+ unsigned char *intersection = NULL;
- return _gf_false;
+ priv = this->private;
+ intersection = alloca0(priv->child_count);
+
+ afr_readables_intersect_get(inode, this, &event, intersection);
+
+ if (AFR_COUNT(intersection, priv->child_count) <= 0) {
+ /* TODO: If we have one brick with valid data_readable and
+ * another with metadata_readable, try to send an iatt with
+ * valid bits from both.*/
+ return -1;
+ }
+
+ memcpy(readable, intersection, sizeof(*readable) * priv->child_count);
+
+ return afr_read_subvol_select_by_policy(inode, this, intersection, args);
}
-gf_boolean_t
-afr_xattrs_are_equal (dict_t *dict1, dict_t *dict2)
+static inline int
+afr_first_up_child(call_frame_t *frame, xlator_t *this)
{
- return are_dicts_equal (dict1, dict2, afr_xattr_match, NULL);
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++)
+ if (local->replies[i].valid && local->replies[i].op_ret == 0)
+ return i;
+ return -1;
}
-static int
-afr_get_parent_read_subvol (xlator_t *this, inode_t *parent,
- struct afr_reply *replies, unsigned char *readable)
+static void
+afr_attempt_readsubvol_set(call_frame_t *frame, xlator_t *this,
+ unsigned char *success_replies,
+ unsigned char *data_readable, int *read_subvol)
{
- int i = 0;
- int par_read_subvol = -1;
- int par_read_subvol_iter = -1;
- afr_private_t *priv = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int spb_subvol = -1;
+ int child_count = -1;
- priv = this->private;
+ if (*read_subvol != -1)
+ return;
- if (parent)
- par_read_subvol = afr_data_subvol_get (parent, this, 0, 0,
- NULL);
+ priv = this->private;
+ local = frame->local;
+ child_count = priv->child_count;
+
+ afr_split_brain_read_subvol_get(local->inode, this, frame, &spb_subvol);
+ if ((spb_subvol >= 0) &&
+ (AFR_COUNT(success_replies, child_count) == child_count)) {
+ *read_subvol = spb_subvol;
+ } else if (!priv->quorum_count ||
+ frame->root->pid == GF_CLIENT_PID_GLFS_HEAL) {
+ *read_subvol = afr_first_up_child(frame, this);
+ } else if (priv->quorum_count &&
+ afr_has_quorum(data_readable, this, NULL)) {
+ /* read_subvol is guaranteed to be valid if we hit this path. */
+ *read_subvol = afr_first_up_child(frame, this);
+ } else {
+ /* If quorum is enabled and we do not have a
+ readable yet, it means all good copies are down.
+ */
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_READ_SUBVOL_ERROR,
+ "no read "
+ "subvols for %s",
+ local->loc.path);
+ }
+ if (*read_subvol >= 0)
+ dict_del_sizen(local->replies[*read_subvol].xdata, GF_CONTENT_KEY);
+}
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
+static void
+afr_lookup_done(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = -1;
+ int op_errno = 0;
+ int read_subvol = 0;
+ int par_read_subvol = 0;
+ int ret = -1;
+ unsigned char *readable = NULL;
+ unsigned char *success_replies = NULL;
+ int event = 0;
+ struct afr_reply *replies = NULL;
+ uuid_t read_gfid = {
+ 0,
+ };
+ gf_boolean_t locked_entry = _gf_false;
+ gf_boolean_t in_flight_create = _gf_false;
+ gf_boolean_t can_interpret = _gf_true;
+ inode_t *parent = NULL;
+ ia_type_t ia_type = IA_INVAL;
+ afr_read_subvol_args_t args = {
+ 0,
+ };
+ char *gfid_heal_msg = NULL;
+
+ priv = this->private;
+ local = frame->local;
+ replies = local->replies;
+ parent = local->loc.parent;
+
+ locked_entry = afr_is_possibly_under_txn(AFR_ENTRY_TRANSACTION, local,
+ this);
+
+ readable = alloca0(priv->child_count);
+ success_replies = alloca0(priv->child_count);
+
+ afr_inode_read_subvol_get(parent, this, readable, NULL, &event);
+ par_read_subvol = afr_get_parent_read_subvol(this, parent, replies,
+ readable);
+
+ /* First, check if we have a gfid-change from somewhere,
+ If so, propagate that so that a fresh lookup can be
+ issued
+ */
+ if (local->cont.lookup.needs_fresh_lookup) {
+ local->op_ret = -1;
+ local->op_errno = ESTALE;
+ goto error;
+ }
- if (replies[i].op_ret < 0)
- continue;
+ op_errno = afr_final_errno(frame->local, this->private);
+ local->op_errno = op_errno;
- if (par_read_subvol_iter == -1) {
- par_read_subvol_iter = i;
- continue;
- }
+ read_subvol = -1;
+ afr_fill_success_replies(local, priv, success_replies);
- if ((par_read_subvol_iter != par_read_subvol) && readable[i])
- par_read_subvol_iter = i;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
- if (i == par_read_subvol)
- par_read_subvol_iter = i;
+ if (replies[i].op_ret == -1) {
+ if (locked_entry && replies[i].op_errno == ENOENT) {
+ in_flight_create = _gf_true;
+ }
+ continue;
}
- /* At the end of the for-loop, the only reason why @par_read_subvol_iter
- * could be -1 is when this LOOKUP has failed on all sub-volumes.
- * So it is okay to send an arbitrary subvolume (0 in this case)
- * as parent read subvol.
- */
- if (par_read_subvol_iter == -1)
- par_read_subvol_iter = 0;
-
- return par_read_subvol_iter;
-}
+ if (read_subvol == -1 || !readable[read_subvol]) {
+ read_subvol = i;
+ gf_uuid_copy(read_gfid, replies[i].poststat.ia_gfid);
+ ia_type = replies[i].poststat.ia_type;
+ local->op_ret = 0;
+ }
+ }
-static void
-afr_lookup_done (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = -1;
- int op_errno = 0;
- int read_subvol = 0;
- int par_read_subvol = 0;
- unsigned char *readable = NULL;
- int event = 0;
- struct afr_reply *replies = NULL;
- uuid_t read_gfid = {0, };
- gf_boolean_t locked_entry = _gf_false;
- gf_boolean_t can_interpret = _gf_true;
- inode_t *parent = NULL;
- int spb_choice = -1;
- ia_type_t ia_type = IA_INVAL;
- afr_read_subvol_args_t args = {0,};
-
- priv = this->private;
- local = frame->local;
- replies = local->replies;
- parent = local->loc.parent;
-
- locked_entry = afr_is_entry_possibly_under_txn (local, this);
-
- readable = alloca0 (priv->child_count);
-
- afr_inode_read_subvol_get (parent, this, readable, NULL, &event);
-
- afr_inode_split_brain_choice_get (local->inode, this,
- &spb_choice);
- /* First, check if we have a gfid-change from somewhere,
- If so, propagate that so that a fresh lookup can be
- issued
- */
- if (local->cont.lookup.needs_fresh_lookup) {
- local->op_ret = -1;
- local->op_errno = ESTALE;
- goto unwind;
- }
-
- op_errno = afr_final_errno (frame->local, this->private);
- local->op_errno = op_errno;
-
- read_subvol = -1;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
-
- if (locked_entry && replies[i].op_ret == -1 &&
- replies[i].op_errno == ENOENT) {
- /* Second, check entry is still
- "underway" in creation */
- local->op_ret = -1;
- local->op_errno = ENOENT;
- goto unwind;
- }
-
- if (replies[i].op_ret == -1)
- continue;
-
- if (read_subvol == -1 || !readable[read_subvol]) {
- read_subvol = i;
- gf_uuid_copy (read_gfid, replies[i].poststat.ia_gfid);
- ia_type = replies[i].poststat.ia_type;
- local->op_ret = 0;
- }
- }
-
- if (read_subvol == -1)
- goto unwind;
- /* We now have a read_subvol, which is readable[] (if there
- were any). Next we look for GFID mismatches. We don't
- consider a GFID mismatch as an error if read_subvol is
- readable[] but the mismatching GFID subvol is not.
- */
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid || replies[i].op_ret == -1) {
- if (priv->child_up[i])
- can_interpret = _gf_false;
- continue;
- }
-
- if (!gf_uuid_compare (replies[i].poststat.ia_gfid, read_gfid))
- continue;
-
- can_interpret = _gf_false;
-
- if (locked_entry)
- continue;
-
- /* Now GFIDs mismatch. It's OK as long as this subvol
- is not readable[] but read_subvol is */
- if (readable[read_subvol] && !readable[i])
- continue;
-
- /* LOG ERROR */
- local->op_ret = -1;
- local->op_errno = EIO;
- goto unwind;
- }
-
- /* Forth, for the finalized GFID, pick the best subvolume
- to return stats from.
- */
- if (can_interpret) {
- /* It is safe to call afr_replies_interpret() because we have
- a response from all the UP subvolumes and all of them resolved
- to the same GFID
- */
- gf_uuid_copy (args.gfid, read_gfid);
- args.ia_type = ia_type;
- if (afr_replies_interpret (frame, this, local->inode)) {
- read_subvol = afr_data_subvol_get (local->inode, this,
- 0, 0, &args);
- afr_inode_read_subvol_reset (local->inode, this);
- goto cant_interpret;
- } else {
- read_subvol = afr_data_subvol_get (local->inode, this,
- 0, 0, &args);
- }
- } else {
- cant_interpret:
- if (read_subvol == -1) {
- if (spb_choice >= 0)
- read_subvol = spb_choice;
- else
- read_subvol = 0;
- }
- dict_del (replies[read_subvol].xdata, GF_CONTENT_KEY);
- }
+ if (in_flight_create && !afr_has_quorum(success_replies, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = ENOENT;
+ goto error;
+ }
+
+ if (read_subvol == -1)
+ goto error;
+ /* We now have a read_subvol, which is readable[] (if there
+ were any). Next we look for GFID mismatches. We don't
+ consider a GFID mismatch as an error if read_subvol is
+ readable[] but the mismatching GFID subvol is not.
+ */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1) {
+ continue;
+ }
+
+ if (!gf_uuid_compare(replies[i].poststat.ia_gfid, read_gfid))
+ continue;
+
+ can_interpret = _gf_false;
+
+ if (locked_entry)
+ continue;
+
+ /* Now GFIDs mismatch. It's OK as long as this subvol
+ is not readable[] but read_subvol is */
+ if (readable[read_subvol] && !readable[i])
+ continue;
+
+ /* If we were called from glfsheal and there is still a gfid
+ * mismatch, succeed the lookup and let glfsheal print the
+ * response via gfid-heal-msg.*/
+ if (!dict_get_str_sizen(local->xattr_req, "gfid-heal-msg",
+ &gfid_heal_msg))
+ goto cant_interpret;
+
+ /* LOG ERROR */
+ local->op_ret = -1;
+ local->op_errno = EIO;
+ goto error;
+ }
+
+ /* Forth, for the finalized GFID, pick the best subvolume
+ to return stats from.
+ */
+ read_subvol = -1;
+ memset(readable, 0, sizeof(*readable) * priv->child_count);
+ if (can_interpret) {
+ if (!afr_has_quorum(success_replies, this, NULL))
+ goto cant_interpret;
+ /* It is safe to call afr_replies_interpret() because we have
+ a response from all the UP subvolumes and all of them resolved
+ to the same GFID
+ */
+ gf_uuid_copy(args.gfid, read_gfid);
+ args.ia_type = ia_type;
+ ret = afr_replies_interpret(frame, this, local->inode, NULL);
+ read_subvol = afr_read_subvol_decide(local->inode, this, &args,
+ readable);
+ if (read_subvol == -1)
+ goto cant_interpret;
+ if (ret) {
+ afr_inode_need_refresh_set(local->inode, this);
+ dict_del_sizen(local->replies[read_subvol].xdata, GF_CONTENT_KEY);
+ }
+ } else {
+ cant_interpret:
+ afr_attempt_readsubvol_set(frame, this, success_replies, readable,
+ &read_subvol);
+ if (read_subvol == -1) {
+ goto error;
+ }
+ }
- afr_handle_quota_size (frame, this);
+ afr_handle_quota_size(frame, this);
-unwind:
- if (read_subvol == -1) {
- if (spb_choice >= 0)
- read_subvol = spb_choice;
- else
- read_subvol = 0;
+ afr_set_need_heal(this, local);
+ if (AFR_IS_ARBITER_BRICK(priv, read_subvol) && local->op_ret == 0) {
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ gf_msg_debug(this->name, 0,
+ "Arbiter cannot be a read subvol "
+ "for %s",
+ local->loc.path);
+ goto error;
+ }
+
+ ret = dict_get_str_sizen(local->xattr_req, "gfid-heal-msg", &gfid_heal_msg);
+ if (!ret) {
+ ret = dict_set_str_sizen(local->replies[read_subvol].xdata,
+ "gfid-heal-msg", gfid_heal_msg);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_DICT_SET_FAILED,
+ "Error setting gfid-heal-msg dict");
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
}
- par_read_subvol = afr_get_parent_read_subvol (this, parent, replies,
- readable);
+ }
+
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno,
+ local->inode, &local->replies[read_subvol].poststat,
+ local->replies[read_subvol].xdata,
+ &local->replies[par_read_subvol].postparent);
+ return;
- AFR_STACK_UNWIND (lookup, frame, local->op_ret, local->op_errno,
- local->inode, &local->replies[read_subvol].poststat,
- local->replies[read_subvol].xdata,
- &local->replies[par_read_subvol].postparent);
+error:
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno, NULL, NULL,
+ NULL, NULL);
}
/*
@@ -1660,614 +3122,845 @@ unwind:
* others in that they must be given higher priority while
* returning to the user.
*
- * The hierarchy is ENODATA > ENOENT > ESTALE > others
+ * The hierarchy is ENODATA > ENOENT > ESTALE > ENOSPC others
*/
int
-afr_higher_errno (int32_t old_errno, int32_t new_errno)
+afr_higher_errno(int32_t old_errno, int32_t new_errno)
{
- if (old_errno == ENODATA || new_errno == ENODATA)
- return ENODATA;
- if (old_errno == ENOENT || new_errno == ENOENT)
- return ENOENT;
- if (old_errno == ESTALE || new_errno == ESTALE)
- return ESTALE;
+ if (old_errno == ENODATA || new_errno == ENODATA)
+ return ENODATA;
+ if (old_errno == ENOENT || new_errno == ENOENT)
+ return ENOENT;
+ if (old_errno == ESTALE || new_errno == ESTALE)
+ return ESTALE;
+ if (old_errno == ENOSPC || new_errno == ENOSPC)
+ return ENOSPC;
- return new_errno;
+ return new_errno;
}
-
int
-afr_final_errno (afr_local_t *local, afr_private_t *priv)
+afr_final_errno(afr_local_t *local, afr_private_t *priv)
{
- int i = 0;
- int op_errno = 0;
- int tmp_errno = 0;
+ int i = 0;
+ int op_errno = 0;
+ int tmp_errno = 0;
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret >= 0)
- continue;
- tmp_errno = local->replies[i].op_errno;
- op_errno = afr_higher_errno (op_errno, tmp_errno);
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret >= 0)
+ continue;
+ tmp_errno = local->replies[i].op_errno;
+ op_errno = afr_higher_errno(op_errno, tmp_errno);
+ }
- return op_errno;
+ return op_errno;
}
static int32_t
-afr_local_discovery_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict,
- dict_t *xdata)
-{
- int ret = 0;
- char *pathinfo = NULL;
- gf_boolean_t is_local = _gf_false;
- afr_private_t *priv = NULL;
- int32_t child_index = -1;
-
- if (op_ret != 0) {
- goto out;
- }
-
- priv = this->private;
- child_index = (int32_t)(long)cookie;
-
- ret = dict_get_str (dict, GF_XATTR_PATHINFO_KEY, &pathinfo);
- if (ret != 0) {
- goto out;
- }
-
- ret = glusterfs_is_local_pathinfo (pathinfo, &is_local);
- if (ret) {
- goto out;
- }
-
- /*
- * Note that one local subvolume will override another here. The only
- * way to avoid that would be to retain extra information about whether
- * the previous read_child is local, and it's just not worth it. Even
- * the slowest local subvolume is far preferable to a remote one.
- */
- if (is_local) {
- /* Don't set arbiter as read child. */
- if (AFR_IS_ARBITER_BRICK(priv, child_index))
- goto out;
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_LOCAL_CHILD, "selecting local read_child %s",
- priv->children[child_index]->name);
-
- priv->read_child = child_index;
- }
+afr_local_discovery_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
+{
+ int ret = 0;
+ char *pathinfo = NULL;
+ gf_boolean_t is_local = _gf_false;
+ afr_private_t *priv = NULL;
+ int32_t child_index = -1;
+
+ if (op_ret != 0) {
+ goto out;
+ }
+
+ priv = this->private;
+ child_index = (int32_t)(long)cookie;
+
+ ret = dict_get_str_sizen(dict, GF_XATTR_PATHINFO_KEY, &pathinfo);
+ if (ret != 0) {
+ goto out;
+ }
+
+ ret = glusterfs_is_local_pathinfo(pathinfo, &is_local);
+ if (ret) {
+ goto out;
+ }
+
+ /*
+ * Note that one local subvolume will override another here. The only
+ * way to avoid that would be to retain extra information about whether
+ * the previous read_child is local, and it's just not worth it. Even
+ * the slowest local subvolume is far preferable to a remote one.
+ */
+ if (is_local) {
+ priv->local[child_index] = 1;
+ /* Don't set arbiter as read child. */
+ if (AFR_IS_ARBITER_BRICK(priv, child_index))
+ goto out;
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_LOCAL_CHILD,
+ "selecting local read_child %s",
+ priv->children[child_index]->name);
+
+ priv->read_child = child_index;
+ }
out:
- STACK_DESTROY(frame->root);
- return 0;
+ STACK_DESTROY(frame->root);
+ return 0;
}
static void
-afr_attempt_local_discovery (xlator_t *this, int32_t child_index)
+afr_attempt_local_discovery(xlator_t *this, int32_t child_index)
{
- call_frame_t *newframe = NULL;
- loc_t tmploc = {0,};
- afr_private_t *priv = this->private;
+ call_frame_t *newframe = NULL;
+ loc_t tmploc = {
+ 0,
+ };
+ afr_private_t *priv = this->private;
- newframe = create_frame(this,this->ctx->pool);
- if (!newframe) {
- return;
- }
+ newframe = create_frame(this, this->ctx->pool);
+ if (!newframe) {
+ return;
+ }
- tmploc.gfid[sizeof(tmploc.gfid)-1] = 1;
- STACK_WIND_COOKIE (newframe, afr_local_discovery_cbk,
- (void *)(long)child_index,
- priv->children[child_index],
- priv->children[child_index]->fops->getxattr,
- &tmploc, GF_XATTR_PATHINFO_KEY, NULL);
+ tmploc.gfid[sizeof(tmploc.gfid) - 1] = 1;
+ STACK_WIND_COOKIE(newframe, afr_local_discovery_cbk,
+ (void *)(long)child_index, priv->children[child_index],
+ priv->children[child_index]->fops->getxattr, &tmploc,
+ GF_XATTR_PATHINFO_KEY, NULL);
}
int
-afr_lookup_sh_metadata_wrap (void *opaque)
+afr_lookup_sh_metadata_wrap(void *opaque)
+{
+ call_frame_t *frame = opaque;
+ afr_local_t *local = NULL;
+ xlator_t *this = NULL;
+ inode_t *inode = NULL;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ int i = 0, first = -1;
+ int ret = -1;
+ dict_t *dict = NULL;
+
+ local = frame->local;
+ this = frame->this;
+ priv = this->private;
+ replies = local->replies;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ first = i;
+ break;
+ }
+ if (first == -1)
+ goto out;
+
+ if (afr_selfheal_metadata_by_stbuf(this, &replies[first].poststat))
+ goto out;
+
+ afr_local_replies_wipe(local, this->private);
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ if (local->xattr_req) {
+ dict_copy(local->xattr_req, dict);
+ }
+
+ ret = dict_set_sizen_str_sizen(dict, "link-count", GF_XATTROP_INDEX_COUNT);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
+ }
+
+ if (loc_is_nameless(&local->loc)) {
+ ret = afr_selfheal_unlocked_discover_on(frame, local->inode,
+ local->loc.gfid, local->replies,
+ local->child_up, dict);
+ } else {
+ inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
+ local->loc.name, local->replies,
+ local->child_up, dict);
+ }
+ if (inode)
+ inode_unref(inode);
+out:
+ if (loc_is_nameless(&local->loc))
+ afr_discover_done(frame, this);
+ else
+ afr_lookup_done(frame, this);
+
+ if (dict)
+ dict_unref(dict);
+
+ return 0;
+}
+
+gf_boolean_t
+afr_is_pending_set(xlator_t *this, dict_t *xdata, int type)
{
- call_frame_t *frame = opaque;
- afr_local_t *local = NULL;
- xlator_t *this = NULL;
- inode_t *inode = NULL;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- int i= 0, first = -1;
+ int idx = -1;
+ afr_private_t *priv = NULL;
+ void *pending_raw = NULL;
+ int *pending_int = NULL;
+ int i = 0;
- local = frame->local;
- this = frame->this;
- priv = this->private;
- replies = local->replies;
-
- for (i =0; i < priv->child_count; i++) {
- if(!replies[i].valid || replies[i].op_ret == -1)
- continue;
- first = i;
- break;
- }
- if (first == -1)
- goto out;
+ priv = this->private;
+ idx = afr_index_for_transaction_type(type);
- inode = afr_inode_link (local->inode,&replies[first].poststat);
- if(!inode)
- goto out;
+ if (dict_get_ptr(xdata, AFR_DIRTY, &pending_raw) == 0) {
+ if (pending_raw) {
+ pending_int = pending_raw;
+
+ if (ntoh32(pending_int[idx]))
+ return _gf_true;
+ }
+ }
- afr_selfheal_metadata (frame, this, inode);
- inode_forget (inode, 1);
- inode_unref (inode);
+ for (i = 0; i < priv->child_count; i++) {
+ if (dict_get_ptr(xdata, priv->pending_key[i], &pending_raw))
+ continue;
+ if (!pending_raw)
+ continue;
+ pending_int = pending_raw;
- afr_local_replies_wipe (local, this->private);
- inode = afr_selfheal_unlocked_lookup_on (frame, local->loc.parent,
- local->loc.name, local->replies,
- local->child_up, NULL);
- if (inode)
- inode_unref (inode);
-out:
- afr_lookup_done (frame, this);
+ if (ntoh32(pending_int[idx]))
+ return _gf_true;
+ }
- return 0;
+ return _gf_false;
}
static gf_boolean_t
afr_can_start_metadata_self_heal(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- int i = 0, first = -1;
- gf_boolean_t start = _gf_false;
- struct iatt stbuf = {0, };
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ int i = 0, first = -1;
+ gf_boolean_t start = _gf_false;
+ struct iatt stbuf = {
+ 0,
+ };
- local = frame->local;
- replies = local->replies;
- priv = this->private;
+ local = frame->local;
+ replies = local->replies;
+ priv = this->private;
- if (!priv->metadata_self_heal)
- return _gf_false;
+ if (!priv->metadata_self_heal)
+ return _gf_false;
- for (i = 0; i < priv->child_count; i++) {
- if(!replies[i].valid || replies[i].op_ret == -1)
- continue;
- if (first == -1) {
- first = i;
- stbuf = replies[i].poststat;
- continue;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ if (first == -1) {
+ first = i;
+ stbuf = replies[i].poststat;
+ continue;
+ }
- if (gf_uuid_compare (stbuf.ia_gfid, replies[i].poststat.ia_gfid)) {
- start = _gf_false;
- break;
- }
- if (!IA_EQUAL (stbuf, replies[i].poststat, type)) {
- start = _gf_false;
- break;
- }
+ if (afr_is_pending_set(this, replies[i].xdata,
+ AFR_METADATA_TRANSACTION)) {
+ /* Let shd do the heal so that lookup is not blocked
+ * on getting metadata lock/doing the heal */
+ start = _gf_false;
+ break;
+ }
- /*Check if iattrs need heal*/
- if ((!IA_EQUAL (stbuf, replies[i].poststat, uid)) ||
- (!IA_EQUAL (stbuf, replies[i].poststat, gid)) ||
- (!IA_EQUAL (stbuf, replies[i].poststat, prot))) {
- start = _gf_true;
- continue;
- }
+ if (gf_uuid_compare(stbuf.ia_gfid, replies[i].poststat.ia_gfid)) {
+ start = _gf_false;
+ break;
+ }
+ if (!IA_EQUAL(stbuf, replies[i].poststat, type)) {
+ start = _gf_false;
+ break;
+ }
- /*Check if xattrs need heal*/
- if (!afr_xattrs_are_equal (replies[first].xdata,
- replies[i].xdata))
- start = _gf_true;
+ /*Check if iattrs need heal*/
+ if ((!IA_EQUAL(stbuf, replies[i].poststat, uid)) ||
+ (!IA_EQUAL(stbuf, replies[i].poststat, gid)) ||
+ (!IA_EQUAL(stbuf, replies[i].poststat, prot))) {
+ start = _gf_true;
+ continue;
}
- return start;
+ /*Check if xattrs need heal*/
+ if (!afr_xattrs_are_equal(replies[first].xdata, replies[i].xdata))
+ start = _gf_true;
+ }
+
+ return start;
}
int
-afr_lookup_metadata_heal_check (call_frame_t *frame, xlator_t *this)
+afr_lookup_metadata_heal_check(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *heal = NULL;
- int ret = 0;
+ call_frame_t *heal = NULL;
+ afr_local_t *local = NULL;
+ int ret = 0;
- if (!afr_can_start_metadata_self_heal (frame, this))
- goto out;
+ local = frame->local;
+ if (!afr_can_start_metadata_self_heal(frame, this))
+ goto out;
- heal = copy_frame (frame);
- if (heal)
- heal->root->pid = GF_CLIENT_PID_AFR_SELF_HEALD;
- ret = synctask_new (this->ctx->env, afr_lookup_sh_metadata_wrap,
- afr_refresh_selfheal_done, heal, frame);
- if(ret)
- goto out;
- return ret;
+ heal = afr_frame_create(this, &ret);
+ if (!heal) {
+ ret = -ret;
+ goto out;
+ }
+
+ ret = synctask_new(this->ctx->env, afr_lookup_sh_metadata_wrap,
+ afr_refresh_selfheal_done, heal, frame);
+ if (ret)
+ goto out;
+ return ret;
out:
- afr_lookup_done (frame, this);
- return ret;
+ if (loc_is_nameless(&local->loc))
+ afr_discover_done(frame, this);
+ else
+ afr_lookup_done(frame, this);
+ if (heal)
+ AFR_STACK_DESTROY(heal);
+ return ret;
}
int
-afr_lookup_selfheal_wrap (void *opaque)
+afr_lookup_selfheal_wrap(void *opaque)
{
- int ret = 0;
- call_frame_t *frame = opaque;
- afr_local_t *local = NULL;
- xlator_t *this = NULL;
- inode_t *inode = NULL;
+ int ret = 0;
+ call_frame_t *frame = opaque;
+ afr_local_t *local = NULL;
+ xlator_t *this = NULL;
+ inode_t *inode = NULL;
+ uuid_t pargfid = {
+ 0,
+ };
- local = frame->local;
- this = frame->this;
+ local = frame->local;
+ this = frame->this;
+ loc_pargfid(&local->loc, pargfid);
- ret = afr_selfheal_name (frame->this, local->loc.pargfid,
- local->loc.name, &local->cont.lookup.gfid_req);
- if (ret == -EIO)
- goto unwind;
+ ret = afr_selfheal_name(frame->this, pargfid, local->loc.name,
+ &local->cont.lookup.gfid_req, local->xattr_req);
+ if (ret == -EIO)
+ goto unwind;
- afr_local_replies_wipe (local, this->private);
+ afr_local_replies_wipe(local, this->private);
- inode = afr_selfheal_unlocked_lookup_on (frame, local->loc.parent,
- local->loc.name, local->replies,
- local->child_up, NULL);
- if (inode)
- inode_unref (inode);
+ inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
+ local->loc.name, local->replies,
+ local->child_up, local->xattr_req);
+ if (inode)
+ inode_unref(inode);
- afr_lookup_metadata_heal_check(frame, this);
- return 0;
+ afr_lookup_metadata_heal_check(frame, this);
+ return 0;
unwind:
- AFR_STACK_UNWIND (lookup, frame, -1, EIO, NULL, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, EIO, NULL, NULL, NULL, NULL);
+ return 0;
}
-
int
-afr_lookup_entry_heal (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- call_frame_t *heal = NULL;
- int i = 0, first = -1;
- gf_boolean_t need_heal = _gf_false;
- struct afr_reply *replies = NULL;
- int ret = 0;
-
- local = frame->local;
- replies = local->replies;
- priv = this->private;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
-
- if ((replies[i].op_ret == -1) &&
- (replies[i].op_errno == ENODATA))
- need_heal = _gf_true;
-
- if (first == -1) {
- first = i;
- continue;
- }
-
- if (replies[i].op_ret != replies[first].op_ret) {
- need_heal = _gf_true;
- break;
- }
-
- if (gf_uuid_compare (replies[i].poststat.ia_gfid,
- replies[first].poststat.ia_gfid)) {
- need_heal = _gf_true;
- break;
- }
- }
-
- if (need_heal) {
- heal = copy_frame (frame);
- if (heal)
- heal->root->pid = GF_CLIENT_PID_AFR_SELF_HEALD;
- ret = synctask_new (this->ctx->env, afr_lookup_selfheal_wrap,
- afr_refresh_selfheal_done, heal, frame);
- if (ret)
- goto metadata_heal;
- return ret;
- }
-metadata_heal:
- ret = afr_lookup_metadata_heal_check (frame, this);
-
- return ret;
-}
-
+afr_lookup_entry_heal(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ call_frame_t *heal = NULL;
+ int i = 0, first = -1;
+ gf_boolean_t name_state_mismatch = _gf_false;
+ struct afr_reply *replies = NULL;
+ int ret = 0;
+ unsigned char *par_readables = NULL;
+ unsigned char *success = NULL;
+ int32_t op_errno = 0;
+ uuid_t gfid = {0};
+
+ local = frame->local;
+ replies = local->replies;
+ priv = this->private;
+ par_readables = alloca0(priv->child_count);
+ success = alloca0(priv->child_count);
+
+ ret = afr_inode_read_subvol_get(local->loc.parent, this, par_readables,
+ NULL, NULL);
+ if (ret < 0 || AFR_COUNT(par_readables, priv->child_count) == 0) {
+ /* In this case set par_readables to all 1 so that name_heal
+ * need checks at the end of this function will flag missing
+ * entry when name state mismatches*/
+ memset(par_readables, 1, priv->child_count);
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+
+ if (replies[i].op_ret == 0) {
+ if (gf_uuid_is_null(gfid)) {
+ gf_uuid_copy(gfid, replies[i].poststat.ia_gfid);
+ }
+ success[i] = 1;
+ } else {
+ if ((replies[i].op_errno != ENOTCONN) &&
+ (replies[i].op_errno != ENOENT) &&
+ (replies[i].op_errno != ESTALE)) {
+ op_errno = replies[i].op_errno;
+ }
+ }
-int
-afr_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, inode_t *inode, struct iatt *buf,
- dict_t *xdata, struct iatt *postparent)
-{
- afr_local_t * local = NULL;
- int call_count = -1;
- int child_index = -1;
+ /*gfid is missing, needs heal*/
+ if ((replies[i].op_ret == -1) && (replies[i].op_errno == ENODATA)) {
+ goto name_heal;
+ }
- child_index = (long) cookie;
+ if (first == -1) {
+ first = i;
+ continue;
+ }
- local = frame->local;
+ if (replies[i].op_ret != replies[first].op_ret) {
+ name_state_mismatch = _gf_true;
+ }
- local->replies[child_index].valid = 1;
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
- /*
- * On revalidate lookup if the gfid-changed, afr should unwind the fop
- * with ESTALE so that a fresh lookup will be sent by the top xlator.
- * So remember it.
- */
- if (xdata && dict_get (xdata, "gfid-changed"))
- local->cont.lookup.needs_fresh_lookup = _gf_true;
+ if (replies[i].op_ret == 0) {
+ /* Rename after this lookup may succeed if we don't do
+ * a name-heal and the destination may not have pending xattrs
+ * to indicate which name is good and which is bad so always do
+ * this heal*/
+ if (gf_uuid_compare(replies[i].poststat.ia_gfid, gfid)) {
+ goto name_heal;
+ }
+ }
+ }
- if (op_ret != -1) {
- local->replies[child_index].poststat = *buf;
- local->replies[child_index].postparent = *postparent;
- if (xdata)
- local->replies[child_index].xdata = dict_ref (xdata);
- }
+ if (name_state_mismatch) {
+ if (!priv->quorum_count)
+ goto name_heal;
+ if (!afr_has_quorum(success, this, NULL))
+ goto name_heal;
+ if (op_errno)
+ goto name_heal;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+ if (par_readables[i] && replies[i].op_ret < 0 &&
+ replies[i].op_errno != ENOTCONN) {
+ goto name_heal;
+ }
+ }
+ }
+
+ goto metadata_heal;
+
+name_heal:
+ heal = afr_frame_create(this, NULL);
+ if (!heal)
+ goto metadata_heal;
+
+ ret = synctask_new(this->ctx->env, afr_lookup_selfheal_wrap,
+ afr_refresh_selfheal_done, heal, frame);
+ if (ret) {
+ AFR_STACK_DESTROY(heal);
+ goto metadata_heal;
+ }
+ return ret;
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- afr_lookup_entry_heal (frame, this);
- }
+metadata_heal:
+ ret = afr_lookup_metadata_heal_check(frame, this);
- return 0;
+ return ret;
}
+int
+afr_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
+{
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ int child_index = -1;
+ GF_UNUSED int ret = 0;
+ int8_t need_heal = 1;
+
+ child_index = (long)cookie;
+
+ local = frame->local;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ /*
+ * On revalidate lookup if the gfid-changed, afr should unwind the fop
+ * with ESTALE so that a fresh lookup will be sent by the top xlator.
+ * So remember it.
+ */
+ if (xdata && dict_get_sizen(xdata, "gfid-changed"))
+ local->cont.lookup.needs_fresh_lookup = _gf_true;
+
+ if (xdata) {
+ ret = dict_get_int8(xdata, "link-count", &need_heal);
+ local->replies[child_index].need_heal = need_heal;
+ } else {
+ local->replies[child_index].need_heal = need_heal;
+ }
+ if (op_ret != -1) {
+ local->replies[child_index].poststat = *buf;
+ local->replies[child_index].postparent = *postparent;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
+ }
+
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ afr_set_need_heal(this, local);
+ afr_lookup_entry_heal(frame, this);
+ }
+ return 0;
+}
static void
-afr_discover_done (call_frame_t *frame, xlator_t *this)
+afr_discover_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = -1;
- int op_errno = 0;
- int read_subvol = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int read_subvol = -1;
+ int ret = 0;
+ unsigned char *data_readable = NULL;
+ unsigned char *success_replies = NULL;
- priv = this->private;
- local = frame->local;
+ priv = this->private;
+ local = frame->local;
+ data_readable = alloca0(priv->child_count);
+ success_replies = alloca0(priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret == 0)
- local->op_ret = 0;
- }
-
- op_errno = afr_final_errno (frame->local, this->private);
-
- if (local->op_ret < 0) {
- local->op_errno = op_errno;
- local->op_ret = -1;
- goto unwind;
- }
-
- afr_replies_interpret (frame, this, local->inode);
-
- read_subvol = afr_data_subvol_get (local->inode, this, 0, 0, NULL);
- if (read_subvol == -1) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_READ_SUBVOL_ERROR, "no read subvols for %s",
- local->loc.path);
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid ||
- local->replies[i].op_ret == -1)
- continue;
- read_subvol = i;
- break;
- }
- }
+ afr_fill_success_replies(local, priv, success_replies);
+ if (AFR_COUNT(success_replies, priv->child_count) > 0)
+ local->op_ret = 0;
-unwind:
- if (read_subvol == -1) {
- afr_inode_split_brain_choice_get (local->inode, this,
- &read_subvol);
- if (read_subvol == -1)
- read_subvol = 0;
- }
+ if (local->op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(frame->local, this->private);
+ goto error;
+ }
- AFR_STACK_UNWIND (lookup, frame, local->op_ret, local->op_errno,
- local->inode, &local->replies[read_subvol].poststat,
- local->replies[read_subvol].xdata,
- &local->replies[read_subvol].postparent);
-}
+ if (!afr_has_quorum(success_replies, this, frame))
+ goto unwind;
+ ret = afr_replies_interpret(frame, this, local->inode, NULL);
+ if (ret) {
+ afr_inode_need_refresh_set(local->inode, this);
+ }
-int
-afr_discover_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, inode_t *inode, struct iatt *buf,
- dict_t *xdata, struct iatt *postparent)
-{
- afr_local_t * local = NULL;
- int call_count = -1;
- int child_index = -1;
+ read_subvol = afr_read_subvol_decide(local->inode, this, NULL,
+ data_readable);
- child_index = (long) cookie;
+unwind:
+ afr_attempt_readsubvol_set(frame, this, success_replies, data_readable,
+ &read_subvol);
+ if (read_subvol == -1)
+ goto error;
- local = frame->local;
+ if (AFR_IS_ARBITER_BRICK(priv, read_subvol) && local->op_ret == 0) {
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ gf_msg_debug(this->name, 0,
+ "Arbiter cannot be a read subvol "
+ "for %s",
+ local->loc.path);
+ }
- local->replies[child_index].valid = 1;
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
- if (op_ret != -1) {
- local->replies[child_index].poststat = *buf;
- local->replies[child_index].postparent = *postparent;
- if (xdata)
- local->replies[child_index].xdata = dict_ref (xdata);
- }
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno,
+ local->inode, &local->replies[read_subvol].poststat,
+ local->replies[read_subvol].xdata,
+ &local->replies[read_subvol].postparent);
+ return;
- if (local->do_discovery && (op_ret == 0))
- afr_attempt_local_discovery (this, child_index);
+error:
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno, NULL, NULL,
+ NULL, NULL);
+}
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- afr_discover_done (frame, this);
- }
+static int
+afr_ta_id_file_check(void *opaque)
+{
+ afr_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ loc_t loc = {
+ 0,
+ };
+ struct iatt stbuf = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ uuid_t gfid = {
+ 0,
+ };
+ fd_t *fd = NULL;
+ int ret = 0;
+
+ this = opaque;
+ priv = this->private;
+
+ ret = afr_fill_ta_loc(this, &loc, _gf_false);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to populate thin-arbiter loc for: %s.", loc.name);
+ goto out;
+ }
+
+ ret = syncop_lookup(priv->children[THIN_ARBITER_BRICK_INDEX], &loc, &stbuf,
+ 0, 0, 0);
+ if (ret == 0) {
+ goto out;
+ } else if (ret == -ENOENT) {
+ fd = fd_create(loc.inode, getpid());
+ if (!fd)
+ goto out;
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ gf_uuid_generate(gfid);
+ ret = dict_set_gfuuid(dict, "gfid-req", gfid, true);
+ ret = syncop_create(priv->children[THIN_ARBITER_BRICK_INDEX], &loc,
+ O_RDWR, 0664, fd, &stbuf, dict, NULL);
+ }
+
+out:
+ if (ret == 0) {
+ gf_uuid_copy(priv->ta_gfid, stbuf.ia_gfid);
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to lookup/create thin-arbiter id file.");
+ }
+ if (dict)
+ dict_unref(dict);
+ if (fd)
+ fd_unref(fd);
+ loc_wipe(&loc);
- return 0;
+ return 0;
}
+static int
+afr_ta_id_file_check_cbk(int ret, call_frame_t *ta_frame, void *opaque)
+{
+ return 0;
+}
-int
-afr_discover_do (call_frame_t *frame, xlator_t *this, int err)
+static void
+afr_discover_done(call_frame_t *frame, xlator_t *this)
{
- int ret = 0;
- int i = 0;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = 0;
+ int ret = 0;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ priv = this->private;
+ if (!priv->thin_arbiter_count)
+ goto unwind;
+ if (!gf_uuid_is_null(priv->ta_gfid))
+ goto unwind;
- if (err) {
- local->op_errno = -err;
- ret = -1;
- goto out;
- }
+ ret = synctask_new(this->ctx->env, afr_ta_id_file_check,
+ afr_ta_id_file_check_cbk, NULL, this);
+ if (ret)
+ goto unwind;
+unwind:
+ afr_discover_unwind(frame, this);
+}
- call_count = local->call_count = AFR_COUNT (local->child_up,
- priv->child_count);
+int
+afr_discover_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
+{
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ int child_index = -1;
+ GF_UNUSED int ret = 0;
+ int8_t need_heal = 1;
+
+ child_index = (long)cookie;
+
+ local = frame->local;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (op_ret != -1) {
+ local->replies[child_index].poststat = *buf;
+ local->replies[child_index].postparent = *postparent;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
+ }
+
+ if (local->do_discovery && (op_ret == 0))
+ afr_attempt_local_discovery(this, child_index);
+
+ if (xdata) {
+ ret = dict_get_int8(xdata, "link-count", &need_heal);
+ local->replies[child_index].need_heal = need_heal;
+ } else {
+ local->replies[child_index].need_heal = need_heal;
+ }
+
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ afr_set_need_heal(this, local);
+ afr_lookup_metadata_heal_check(frame, this);
+ }
- ret = afr_lookup_xattr_req_prepare (local, this, local->xattr_req,
- &local->loc);
- if (ret) {
- local->op_errno = -ret;
- ret = -1;
- goto out;
- }
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_discover_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->lookup,
- &local->loc, local->xattr_req);
- if (!--call_count)
- break;
- }
+int
+afr_discover_do(call_frame_t *frame, xlator_t *this, int err)
+{
+ int ret = 0;
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (err) {
+ local->op_errno = err;
+ goto out;
+ }
+
+ call_count = local->call_count = AFR_COUNT(local->child_up,
+ priv->child_count);
+
+ ret = afr_lookup_xattr_req_prepare(local, this, local->xattr_req,
+ &local->loc);
+ if (ret) {
+ local->op_errno = -ret;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(
+ frame, afr_discover_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->lookup, &local->loc, local->xattr_req);
+ if (!--call_count)
+ break;
}
+ }
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
+ return 0;
}
-
int
-afr_discover (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+afr_discover(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
{
- int op_errno = ENOMEM;
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int event = 0;
+ int op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int event = 0;
- priv = this->private;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- if (!local->call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ if (!local->call_count) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
- if (__is_root_gfid (loc->inode->gfid)) {
- if (!this->itable)
- this->itable = loc->inode->table;
- if (!priv->root_inode)
- priv->root_inode = inode_ref (loc->inode);
+ if (__is_root_gfid(loc->inode->gfid)) {
+ if (!priv->root_inode)
+ priv->root_inode = inode_ref(loc->inode);
- if (priv->choose_local && !priv->did_discovery) {
- /* Logic to detect which subvolumes of AFR are
- local, in order to prefer them for reads
- */
- local->do_discovery = _gf_true;
- priv->did_discovery = _gf_true;
- }
- }
+ if (priv->choose_local && !priv->did_discovery) {
+ /* Logic to detect which subvolumes of AFR are
+ local, in order to prefer them for reads
+ */
+ local->do_discovery = _gf_true;
+ priv->did_discovery = _gf_true;
+ }
+ }
- local->op = GF_FOP_LOOKUP;
+ local->op = GF_FOP_LOOKUP;
- loc_copy (&local->loc, loc);
+ loc_copy(&local->loc, loc);
- local->inode = inode_ref (loc->inode);
+ local->inode = inode_ref(loc->inode);
- if (xattr_req)
- /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
- allocate one for us */
- local->xattr_req = dict_ref (xattr_req);
+ if (xattr_req) {
+ /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
+ allocate one for us */
+ local->xattr_req = dict_copy_with_ref(xattr_req, NULL);
+ if (!local->xattr_req) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+ }
- if (gf_uuid_is_null (loc->inode->gfid)) {
- afr_discover_do (frame, this, 0);
- return 0;
- }
+ if (gf_uuid_is_null(loc->inode->gfid)) {
+ afr_discover_do(frame, this, 0);
+ return 0;
+ }
- afr_read_subvol_get (loc->inode, this, NULL, &event,
- AFR_DATA_TRANSACTION, NULL);
+ afr_read_subvol_get(loc->inode, this, NULL, NULL, &event,
+ AFR_DATA_TRANSACTION, NULL);
- if (event != local->event_generation)
- afr_inode_refresh (frame, this, loc->inode, afr_discover_do);
- else
- afr_discover_do (frame, this, 0);
+ afr_discover_do(frame, this, 0);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ return 0;
}
-
int
-afr_lookup_do (call_frame_t *frame, xlator_t *this, int err)
-{
- int ret = 0;
- int i = 0;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = 0;
-
- local = frame->local;
- priv = this->private;
-
- if (err < 0) {
- local->op_errno = -err;
- ret = -1;
- goto out;
- }
-
- call_count = local->call_count = AFR_COUNT (local->child_up,
- priv->child_count);
-
- ret = afr_lookup_xattr_req_prepare (local, this, local->xattr_req,
- &local->loc);
- if (ret) {
- local->op_errno = -ret;
- ret = -1;
- goto out;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_lookup_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->lookup,
- &local->loc, local->xattr_req);
- if (!--call_count)
- break;
- }
+afr_lookup_do(call_frame_t *frame, xlator_t *this, int err)
+{
+ int ret = 0;
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (err < 0) {
+ local->op_errno = err;
+ goto out;
+ }
+
+ call_count = local->call_count = AFR_COUNT(local->child_up,
+ priv->child_count);
+
+ ret = afr_lookup_xattr_req_prepare(local, this, local->xattr_req,
+ &local->loc);
+ if (ret) {
+ local->op_errno = -ret;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(
+ frame, afr_lookup_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->lookup, &local->loc, local->xattr_req);
+ if (!--call_count)
+ break;
}
- return 0;
+ }
+ return 0;
out:
- AFR_STACK_UNWIND (lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
+ return 0;
}
/*
@@ -2307,2675 +4000,3879 @@ out:
*/
int
-afr_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
-{
- afr_local_t *local = NULL;
- int32_t op_errno = 0;
- int event = 0;
- void *gfid_req = NULL;
- int ret = 0;
-
- if (!loc->parent && gf_uuid_is_null (loc->pargfid)) {
- if (xattr_req)
- dict_del (xattr_req, "gfid-req");
- afr_discover (frame, this, loc, xattr_req);
- return 0;
- }
-
- if (__is_root_gfid (loc->parent->gfid)) {
- if (!strcmp (loc->name, GF_REPLICATE_TRASH_DIR)) {
- op_errno = EPERM;
- goto out;
- }
- }
-
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
-
- if (!local->call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- local->op = GF_FOP_LOOKUP;
+afr_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+{
+ afr_local_t *local = NULL;
+ int32_t op_errno = 0;
+ int event = 0;
+ int ret = 0;
- loc_copy (&local->loc, loc);
+ if (loc_is_nameless(loc)) {
+ if (xattr_req)
+ dict_del_sizen(xattr_req, "gfid-req");
+ afr_discover(frame, this, loc, xattr_req);
+ return 0;
+ }
- local->inode = inode_ref (loc->inode);
+ if (afr_is_private_directory(this->private, loc->parent->gfid, loc->name,
+ frame->root->pid)) {
+ op_errno = EPERM;
+ goto out;
+ }
- if (xattr_req) {
- /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
- allocate one for us */
- ret = dict_get_ptr (xattr_req, "gfid-req", &gfid_req);
- if (ret == 0) {
- gf_uuid_copy (local->cont.lookup.gfid_req, gfid_req);
- dict_del (xattr_req, "gfid-req");
- }
- local->xattr_req = dict_ref (xattr_req);
- }
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- afr_read_subvol_get (loc->parent, this, NULL, &event,
- AFR_DATA_TRANSACTION, NULL);
+ if (!local->call_count) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
- if (event != local->event_generation)
- afr_inode_refresh (frame, this, loc->parent, afr_lookup_do);
- else
- afr_lookup_do (frame, this, 0);
+ local->op = GF_FOP_LOOKUP;
- return 0;
-out:
- AFR_STACK_UNWIND (lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ loc_copy(&local->loc, loc);
- return 0;
-}
-
-
-/* {{{ open */
+ local->inode = inode_ref(loc->inode);
-afr_fd_ctx_t *
-__afr_fd_ctx_get (fd_t *fd, xlator_t *this)
-{
- uint64_t ctx = 0;
- int ret = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
-
- ret = __fd_ctx_get (fd, this, &ctx);
+ if (xattr_req) {
+ /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
+ allocate one for us */
+ local->xattr_req = dict_copy_with_ref(xattr_req, NULL);
+ if (!local->xattr_req) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+ ret = dict_get_gfuuid(local->xattr_req, "gfid-req",
+ &local->cont.lookup.gfid_req);
+ if (ret == 0) {
+ dict_del_sizen(local->xattr_req, "gfid-req");
+ }
+ }
- if (ret < 0) {
- ret = __afr_fd_ctx_set (this, fd);
- if (ret < 0)
- goto out;
+ afr_read_subvol_get(loc->parent, this, NULL, NULL, &event,
+ AFR_DATA_TRANSACTION, NULL);
- ret = __fd_ctx_get (fd, this, &ctx);
- if (ret < 0)
- goto out;
- }
+ afr_lookup_do(frame, this, 0);
- fd_ctx = (afr_fd_ctx_t *)(long) ctx;
+ return 0;
out:
- return fd_ctx;
-}
+ AFR_STACK_UNWIND(lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ return 0;
+}
-afr_fd_ctx_t *
-afr_fd_ctx_get (fd_t *fd, xlator_t *this)
+void
+_afr_cleanup_fd_ctx(xlator_t *this, afr_fd_ctx_t *fd_ctx)
{
- afr_fd_ctx_t *fd_ctx = NULL;
+ afr_private_t *priv = this->private;
- LOCK(&fd->lock);
+ if (fd_ctx->lk_heal_info) {
+ LOCK(&priv->lock);
{
- fd_ctx = __afr_fd_ctx_get (fd, this);
+ list_del(&fd_ctx->lk_heal_info->pos);
}
- UNLOCK(&fd->lock);
-
- return fd_ctx;
+ afr_lk_heal_info_cleanup(fd_ctx->lk_heal_info);
+ fd_ctx->lk_heal_info = NULL;
+ }
+ GF_FREE(fd_ctx->opened_on);
+ GF_FREE(fd_ctx);
+ return;
}
-
int
-__afr_fd_ctx_set (xlator_t *this, fd_t *fd)
+afr_cleanup_fd_ctx(xlator_t *this, fd_t *fd)
{
- afr_private_t * priv = NULL;
- int ret = -1;
- uint64_t ctx = 0;
- afr_fd_ctx_t * fd_ctx = NULL;
- int i = 0;
+ uint64_t ctx = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int ret = 0;
- VALIDATE_OR_GOTO (this->private, out);
- VALIDATE_OR_GOTO (fd, out);
+ ret = fd_ctx_get(fd, this, &ctx);
+ if (ret < 0)
+ goto out;
- priv = this->private;
+ fd_ctx = (afr_fd_ctx_t *)(long)ctx;
- ret = __fd_ctx_get (fd, this, &ctx);
+ if (fd_ctx) {
+ _afr_cleanup_fd_ctx(this, fd_ctx);
+ }
- if (ret == 0)
- goto out;
-
- fd_ctx = GF_CALLOC (1, sizeof (afr_fd_ctx_t),
- gf_afr_mt_afr_fd_ctx_t);
- if (!fd_ctx) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++) {
- fd_ctx->pre_op_done[i] = GF_CALLOC (sizeof (*fd_ctx->pre_op_done[i]),
- priv->child_count,
- gf_afr_mt_int32_t);
- if (!fd_ctx->pre_op_done[i]) {
- ret = -ENOMEM;
- goto out;
- }
- }
-
- fd_ctx->opened_on = GF_CALLOC (sizeof (*fd_ctx->opened_on),
- priv->child_count,
- gf_afr_mt_int32_t);
- if (!fd_ctx->opened_on) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (fd_is_anonymous (fd))
- fd_ctx->opened_on[i] = AFR_FD_OPENED;
- else
- fd_ctx->opened_on[i] = AFR_FD_NOT_OPENED;
- }
-
- fd_ctx->lock_piggyback = GF_CALLOC (sizeof (*fd_ctx->lock_piggyback),
- priv->child_count,
- gf_afr_mt_char);
- if (!fd_ctx->lock_piggyback) {
- ret = -ENOMEM;
- goto out;
- }
-
- fd_ctx->lock_acquired = GF_CALLOC (sizeof (*fd_ctx->lock_acquired),
- priv->child_count,
- gf_afr_mt_char);
- if (!fd_ctx->lock_acquired) {
- ret = -ENOMEM;
- goto out;
- }
-
- fd_ctx->readdir_subvol = -1;
-
- pthread_mutex_init (&fd_ctx->delay_lock, NULL);
-
- INIT_LIST_HEAD (&fd_ctx->eager_locked);
-
- ret = __fd_ctx_set (fd, this, (uint64_t)(long) fd_ctx);
- if (ret)
- gf_msg_debug (this->name, 0,
- "failed to set fd ctx (%p)", fd);
out:
- return ret;
+ return 0;
}
-
int
-afr_fd_ctx_set (xlator_t *this, fd_t *fd)
+afr_release(xlator_t *this, fd_t *fd)
{
- int ret = -1;
+ afr_cleanup_fd_ctx(this, fd);
- LOCK (&fd->lock);
- {
- ret = __afr_fd_ctx_set (this, fd);
- }
- UNLOCK (&fd->lock);
-
- return ret;
+ return 0;
}
-/* {{{ flush */
-
-int
-afr_flush_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_fd_ctx_t *
+__afr_fd_ctx_get(fd_t *fd, xlator_t *this)
{
- afr_local_t *local = NULL;
- int call_count = -1;
+ uint64_t ctx = 0;
+ int ret = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
- local = frame->local;
-
- LOCK (&frame->lock);
- {
- if (op_ret != -1) {
- local->op_ret = op_ret;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- } else {
- local->op_errno = op_errno;
- }
- }
- UNLOCK (&frame->lock);
+ ret = __fd_ctx_get(fd, this, &ctx);
- call_count = afr_frame_return (frame);
+ if (ret < 0) {
+ ret = __afr_fd_ctx_set(this, fd);
+ if (ret < 0)
+ goto out;
- if (call_count == 0)
- AFR_STACK_UNWIND (flush, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
+ ret = __fd_ctx_get(fd, this, &ctx);
+ if (ret < 0)
+ goto out;
+ }
- return 0;
+ fd_ctx = (afr_fd_ctx_t *)(long)ctx;
+out:
+ return fd_ctx;
}
-static int
-afr_flush_wrapper (call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+afr_fd_ctx_t *
+afr_fd_ctx_get(fd_t *fd, xlator_t *this)
{
- int i = 0;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = -1;
-
- priv = this->private;
- local = frame->local;
- call_count = local->call_count;
+ afr_fd_ctx_t *fd_ctx = NULL;
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_flush_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->flush,
- local->fd, xdata);
- if (!--call_count)
- break;
+ LOCK(&fd->lock);
+ {
+ fd_ctx = __afr_fd_ctx_get(fd, this);
+ }
+ UNLOCK(&fd->lock);
- }
- }
-
- return 0;
+ return fd_ctx;
}
int
-afr_flush (call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+__afr_fd_ctx_set(xlator_t *this, fd_t *fd)
{
- afr_local_t *local = NULL;
- call_stub_t *stub = NULL;
- int op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ int ret = -1;
+ uint64_t ctx = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int i = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ VALIDATE_OR_GOTO(this->private, out);
+ VALIDATE_OR_GOTO(fd, out);
- if (!local->call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ priv = this->private;
- local->fd = fd_ref(fd);
+ ret = __fd_ctx_get(fd, this, &ctx);
- stub = fop_flush_stub (frame, afr_flush_wrapper, fd, xdata);
- if (!stub)
- goto out;
+ if (ret == 0)
+ goto out;
- afr_delayed_changelog_wake_resume (this, fd, stub);
+ fd_ctx = GF_CALLOC(1, sizeof(afr_fd_ctx_t), gf_afr_mt_afr_fd_ctx_t);
+ if (!fd_ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
- return 0;
+ fd_ctx->opened_on = GF_CALLOC(sizeof(*fd_ctx->opened_on), priv->child_count,
+ gf_afr_mt_int32_t);
+ if (!fd_ctx->opened_on) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (fd_is_anonymous(fd))
+ fd_ctx->opened_on[i] = AFR_FD_OPENED;
+ else
+ fd_ctx->opened_on[i] = AFR_FD_NOT_OPENED;
+ }
+
+ fd_ctx->readdir_subvol = -1;
+ fd_ctx->lk_heal_info = NULL;
+
+ ret = __fd_ctx_set(fd, this, (uint64_t)(long)fd_ctx);
+ if (ret)
+ gf_msg_debug(this->name, 0, "failed to set fd ctx (%p)", fd);
out:
- AFR_STACK_UNWIND (flush, frame, -1, op_errno, NULL);
- return 0;
+ if (ret && fd_ctx)
+ _afr_cleanup_fd_ctx(this, fd_ctx);
+ return ret;
}
-/* }}} */
-
+/* {{{ flush */
int
-afr_cleanup_fd_ctx (xlator_t *this, fd_t *fd)
+afr_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
{
- uint64_t ctx = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
- int ret = 0;
- int i = 0;
+ afr_local_t *local = NULL;
+ int call_count = -1;
- ret = fd_ctx_get (fd, this, &ctx);
- if (ret < 0)
- goto out;
+ local = frame->local;
- fd_ctx = (afr_fd_ctx_t *)(long) ctx;
-
- if (fd_ctx) {
- //no need to take any locks
- if (!list_empty (&fd_ctx->eager_locked))
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_INVALID_DATA, "%s: Stale "
- "Eager-lock stubs found",
- uuid_utoa (fd->inode->gfid));
-
- for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++)
- GF_FREE (fd_ctx->pre_op_done[i]);
-
- GF_FREE (fd_ctx->opened_on);
-
- GF_FREE (fd_ctx->lock_piggyback);
+ LOCK(&frame->lock);
+ {
+ if (op_ret != -1) {
+ local->op_ret = op_ret;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
+ } else {
+ local->op_errno = op_errno;
+ }
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
- GF_FREE (fd_ctx->lock_acquired);
+ if (call_count == 0)
+ AFR_STACK_UNWIND(flush, frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
- pthread_mutex_destroy (&fd_ctx->delay_lock);
+ return 0;
+}
- GF_FREE (fd_ctx);
+static int
+afr_flush_wrapper(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = -1;
+
+ priv = this->private;
+ local = frame->local;
+ call_count = local->call_count;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(frame, afr_flush_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->flush,
+ local->fd, xdata);
+ if (!--call_count)
+ break;
}
+ }
-out:
- return 0;
+ return 0;
}
-
-int
-afr_release (xlator_t *this, fd_t *fd)
+afr_local_t *
+afr_wakeup_same_fd_delayed_op(xlator_t *this, afr_lock_t *lock, fd_t *fd)
{
- afr_cleanup_fd_ctx (this, fd);
-
- return 0;
-}
+ afr_local_t *local = NULL;
+ if (lock->delay_timer) {
+ local = list_entry(lock->post_op.next, afr_local_t,
+ transaction.owner_list);
+ if (fd == local->fd) {
+ if (gf_timer_call_cancel(this->ctx, lock->delay_timer)) {
+ local = NULL;
+ } else {
+ lock->delay_timer = NULL;
+ }
+ } else {
+ local = NULL;
+ }
+ }
-/* {{{ fsync */
+ return local;
+}
-int
-afr_fsync_unwind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
-{
- AFR_STACK_UNWIND (fsync, frame, op_ret, op_errno, prebuf, postbuf,
- xdata);
- return 0;
+void
+afr_delayed_changelog_wake_resume(xlator_t *this, inode_t *inode,
+ call_stub_t *stub)
+{
+ afr_inode_ctx_t *ctx = NULL;
+ afr_lock_t *lock = NULL;
+ afr_local_t *metadata_local = NULL;
+ afr_local_t *data_local = NULL;
+ LOCK(&inode->lock);
+ {
+ (void)__afr_inode_ctx_get(this, inode, &ctx);
+ lock = &ctx->lock[AFR_DATA_TRANSACTION];
+ data_local = afr_wakeup_same_fd_delayed_op(this, lock, stub->args.fd);
+ lock = &ctx->lock[AFR_METADATA_TRANSACTION];
+ metadata_local = afr_wakeup_same_fd_delayed_op(this, lock,
+ stub->args.fd);
+ }
+ UNLOCK(&inode->lock);
+
+ if (data_local) {
+ data_local->transaction.resume_stub = stub;
+ } else if (metadata_local) {
+ metadata_local->transaction.resume_stub = stub;
+ } else {
+ call_resume(stub);
+ }
+ if (data_local) {
+ afr_delayed_changelog_wake_up_cbk(data_local);
+ }
+ if (metadata_local) {
+ afr_delayed_changelog_wake_up_cbk(metadata_local);
+ }
}
int
-afr_fsync_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
+afr_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_count = -1;
- int child_index = (long) cookie;
- int read_subvol = 0;
- call_stub_t *stub = NULL;
+ afr_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ int op_errno = ENOMEM;
- local = frame->local;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- read_subvol = afr_data_subvol_get (local->inode, this, 0, 0, NULL);
+ local->op = GF_FOP_FLUSH;
+ if (!afr_is_consistent_io_possible(local, this->private, &op_errno))
+ goto out;
- LOCK (&frame->lock);
- {
- if (op_ret == 0) {
- if (local->op_ret == -1) {
- local->op_ret = 0;
-
- local->cont.inode_wfop.prebuf = *prebuf;
- local->cont.inode_wfop.postbuf = *postbuf;
-
- if (xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
-
- if (child_index == read_subvol) {
- local->cont.inode_wfop.prebuf = *prebuf;
- local->cont.inode_wfop.postbuf = *postbuf;
- if (xdata) {
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
- local->xdata_rsp = dict_ref (xdata);
- }
- }
- } else {
- local->op_errno = op_errno;
- }
- }
- UNLOCK (&frame->lock);
-
- call_count = afr_frame_return (frame);
-
- if (call_count == 0) {
- /* Make a stub out of the frame, and register it
- with the waking up post-op. When the call-stub resumes,
- we are guaranteed that there was no post-op pending
- (i.e changelogs were unset in the server). This is an
- essential "guarantee", that fsync() returns only after
- completely finishing EVERYTHING, including the delayed
- post-op. This guarantee is expected by FUSE graph switching
- for example.
- */
- stub = fop_fsync_cbk_stub (frame, afr_fsync_unwind_cbk,
- local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf,
- local->xdata_rsp);
- if (!stub) {
- AFR_STACK_UNWIND (fsync, frame, -1, ENOMEM, 0, 0, 0);
- return 0;
- }
-
- /* If no new unstable writes happened between the
- time we cleared the unstable write witness flag in afr_fsync
- and now, calling afr_delayed_changelog_wake_up() should
- wake up and skip over the fsync phase and go straight to
- afr_changelog_post_op_now()
- */
- afr_delayed_changelog_wake_resume (this, local->fd, stub);
- }
+ local->fd = fd_ref(fd);
- return 0;
-}
+ stub = fop_flush_stub(frame, afr_flush_wrapper, fd, xdata);
+ if (!stub)
+ goto out;
+
+ afr_delayed_changelog_wake_resume(this, fd->inode, stub);
+ return 0;
+out:
+ AFR_STACK_UNWIND(flush, frame, -1, op_errno, NULL);
+ return 0;
+}
int
-afr_fsync (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
- dict_t *xdata)
+afr_fsyncdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ int call_count = -1;
- priv = this->private;
+ local = frame->local;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
-
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- local->fd = fd_ref (fd);
+ LOCK(&frame->lock);
+ {
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
+ } else {
+ local->op_errno = op_errno;
+ }
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
- if (afr_fd_has_witnessed_unstable_write (this, fd)) {
- /* don't care. we only wanted to CLEAR the bit */
- }
+ if (call_count == 0)
+ AFR_STACK_UNWIND(fsyncdir, frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
- local->inode = inode_ref (fd->inode);
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_fsync_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->fsync,
- fd, datasync, xdata);
- if (!--call_count)
- break;
- }
+int
+afr_fsyncdir(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+ int32_t call_count = 0;
+ int32_t op_errno = ENOMEM;
+
+ priv = this->private;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_FSYNCDIR;
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
+
+ call_count = local->call_count;
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND(frame, afr_fsyncdir_cbk, priv->children[i],
+ priv->children[i]->fops->fsyncdir, fd, datasync, xdata);
+ if (!--call_count)
+ break;
}
+ }
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (fsync, frame, -1, op_errno, NULL, NULL, NULL);
+ AFR_STACK_UNWIND(fsyncdir, frame, -1, op_errno, NULL);
- return 0;
+ return 0;
}
/* }}} */
-/* {{{ fsync */
+static int
+afr_serialized_lock_wind(call_frame_t *frame, xlator_t *this);
-int
-afr_fsyncdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+static gf_boolean_t
+afr_is_conflicting_lock_present(int32_t op_ret, int32_t op_errno)
{
- afr_local_t *local = NULL;
- int call_count = -1;
+ if (op_ret == -1 && op_errno == EAGAIN)
+ return _gf_true;
+ return _gf_false;
+}
- local = frame->local;
+static void
+afr_fop_lock_unwind(call_frame_t *frame, glusterfs_fop_t op, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ switch (op) {
+ case GF_FOP_INODELK:
+ AFR_STACK_UNWIND(inodelk, frame, op_ret, op_errno, xdata);
+ break;
+ case GF_FOP_FINODELK:
+ AFR_STACK_UNWIND(finodelk, frame, op_ret, op_errno, xdata);
+ break;
+ case GF_FOP_ENTRYLK:
+ AFR_STACK_UNWIND(entrylk, frame, op_ret, op_errno, xdata);
+ break;
+ case GF_FOP_FENTRYLK:
+ AFR_STACK_UNWIND(fentrylk, frame, op_ret, op_errno, xdata);
+ break;
+ default:
+ break;
+ }
+}
- LOCK (&frame->lock);
- {
- if (op_ret == 0) {
- local->op_ret = 0;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- } else {
- local->op_errno = op_errno;
- }
- }
- UNLOCK (&frame->lock);
+static void
+afr_fop_lock_wind(call_frame_t *frame, xlator_t *this, int child_index,
+ int32_t (*lock_cbk)(call_frame_t *, void *, xlator_t *,
+ int32_t, int32_t, dict_t *))
+{
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int i = child_index;
+
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->inodelk,
+ (const char *)local->cont.inodelk.volume, &local->loc,
+ local->cont.inodelk.cmd, &local->cont.inodelk.flock,
+ local->cont.inodelk.xdata);
+ break;
+ case GF_FOP_FINODELK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->finodelk,
+ (const char *)local->cont.inodelk.volume, local->fd,
+ local->cont.inodelk.cmd, &local->cont.inodelk.flock,
+ local->cont.inodelk.xdata);
+ break;
+ case GF_FOP_ENTRYLK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->entrylk, local->cont.entrylk.volume,
+ &local->loc, local->cont.entrylk.basename,
+ local->cont.entrylk.cmd, local->cont.entrylk.type,
+ local->cont.entrylk.xdata);
+ break;
+ case GF_FOP_FENTRYLK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->fentrylk, local->cont.entrylk.volume,
+ local->fd, local->cont.entrylk.basename,
+ local->cont.entrylk.cmd, local->cont.entrylk.type,
+ local->cont.entrylk.xdata);
+ break;
+ default:
+ break;
+ }
+}
- call_count = afr_frame_return (frame);
+void
+afr_fop_lock_proceed(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- if (call_count == 0)
- AFR_STACK_UNWIND (fsyncdir, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
+ local = frame->local;
+ priv = frame->this->private;
- return 0;
+ if (local->fop_lock_state != AFR_FOP_LOCK_PARALLEL) {
+ afr_fop_lock_unwind(frame, local->op, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return;
+ }
+ /* At least one child is up */
+ /*
+ * Non-blocking locks also need to be serialized. Otherwise there is
+ * a chance that both the mounts which issued same non-blocking inodelk
+ * may endup not acquiring the lock on any-brick.
+ * Ex: Mount1 and Mount2
+ * request for full length lock on file f1. Mount1 afr may acquire the
+ * partial lock on brick-1 and may not acquire the lock on brick-2
+ * because Mount2 already got the lock on brick-2, vice versa. Since
+ * both the mounts only got partial locks, afr treats them as failure in
+ * gaining the locks and unwinds with EAGAIN errno.
+ */
+ local->op_ret = -1;
+ local->op_errno = EUCLEAN;
+ local->fop_lock_state = AFR_FOP_LOCK_SERIAL;
+ afr_local_replies_wipe(local, priv);
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = NULL;
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.cmd = local->cont.inodelk.in_cmd;
+ local->cont.inodelk.flock = local->cont.inodelk.in_flock;
+ if (local->cont.inodelk.xdata)
+ dict_unref(local->cont.inodelk.xdata);
+ local->cont.inodelk.xdata = NULL;
+ if (local->xdata_req)
+ local->cont.inodelk.xdata = dict_ref(local->xdata_req);
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.cmd = local->cont.entrylk.in_cmd;
+ if (local->cont.entrylk.xdata)
+ dict_unref(local->cont.entrylk.xdata);
+ local->cont.entrylk.xdata = NULL;
+ if (local->xdata_req)
+ local->cont.entrylk.xdata = dict_ref(local->xdata_req);
+ break;
+ default:
+ break;
+ }
+ afr_serialized_lock_wind(frame, frame->this);
}
+static int32_t
+afr_unlock_partial_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
-int
-afr_fsyncdir (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
- dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = -1;
+ int child_index = (long)cookie;
+ uuid_t gfid = {0};
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ if (op_ret < 0 && op_errno != ENOTCONN) {
+ if (local->fd)
+ gf_uuid_copy(gfid, local->fd->inode->gfid);
+ else
+ loc_gfid(&local->loc, gfid);
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_UNLOCK_FAIL,
+ "%s: Failed to unlock %s on %s "
+ "with lk_owner: %s",
+ uuid_utoa(gfid), gf_fop_list[local->op],
+ priv->children[child_index]->name,
+ lkowner_utoa(&frame->root->lk_owner));
+ }
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ call_count = afr_frame_return(frame);
+ if (call_count == 0)
+ afr_fop_lock_proceed(frame);
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_fsyncdir_cbk,
- priv->children[i],
- priv->children[i]->fops->fsyncdir,
- fd, datasync, xdata);
- if (!--call_count)
- break;
- }
- }
+ return 0;
+}
- return 0;
-out:
- AFR_STACK_UNWIND (fsyncdir, frame, -1, op_errno, NULL);
+static int32_t
+afr_unlock_locks_and_proceed(call_frame_t *frame, xlator_t *this,
+ int call_count)
+{
+ int i = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+
+ if (call_count == 0) {
+ afr_fop_lock_proceed(frame);
+ goto out;
+ }
+
+ local = frame->local;
+ priv = this->private;
+ local->call_count = call_count;
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.flock.l_type = F_UNLCK;
+ local->cont.inodelk.cmd = F_SETLK;
+ if (local->cont.inodelk.xdata)
+ dict_unref(local->cont.inodelk.xdata);
+ local->cont.inodelk.xdata = NULL;
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.cmd = ENTRYLK_UNLOCK;
+ if (local->cont.entrylk.xdata)
+ dict_unref(local->cont.entrylk.xdata);
+ local->cont.entrylk.xdata = NULL;
+ break;
+ default:
+ break;
+ }
- return 0;
-}
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
-/* }}} */
+ if (local->replies[i].op_ret == -1)
+ continue;
-/* {{{ xattrop */
+ afr_fop_lock_wind(frame, this, i, afr_unlock_partial_lock_cbk);
+
+ if (!--call_count)
+ break;
+ }
+
+out:
+ return 0;
+}
int32_t
-afr_xattrop_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *xattr, dict_t *xdata)
+afr_fop_lock_done(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- int call_count = -1;
-
- local = frame->local;
+ int i = 0;
+ int lock_count = 0;
+ unsigned char *success = NULL;
- LOCK (&frame->lock);
- {
- if (op_ret == 0) {
- if (!local->cont.xattrop.xattr)
- local->cont.xattrop.xattr = dict_ref (xattr);
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
+ local = frame->local;
+ priv = this->private;
+ success = alloca0(priv->child_count);
- local->op_ret = 0;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
- local->op_errno = op_errno;
+ if (local->replies[i].op_ret == 0) {
+ lock_count++;
+ success[i] = 1;
}
- UNLOCK (&frame->lock);
- call_count = afr_frame_return (frame);
+ if (local->op_ret == -1 && local->op_errno == EAGAIN)
+ continue;
- if (call_count == 0)
- AFR_STACK_UNWIND (xattrop, frame, local->op_ret, local->op_errno,
- local->cont.xattrop.xattr, local->xdata_rsp);
+ if ((local->replies[i].op_ret == -1) &&
+ (local->replies[i].op_errno == EAGAIN)) {
+ local->op_ret = -1;
+ local->op_errno = EAGAIN;
+ continue;
+ }
- return 0;
-}
+ if (local->replies[i].op_ret == 0)
+ local->op_ret = 0;
+ local->op_errno = local->replies[i].op_errno;
+ }
-int32_t
-afr_xattrop (call_frame_t *frame, xlator_t *this, loc_t *loc,
- gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
+ if (afr_fop_lock_is_unlock(frame))
+ goto unwind;
- priv = this->private;
+ if (afr_is_conflicting_lock_present(local->op_ret, local->op_errno)) {
+ afr_unlock_locks_and_proceed(frame, this, lock_count);
+ } else if (priv->quorum_count && !afr_has_quorum(success, this, NULL)) {
+ local->fop_lock_state = AFR_FOP_LOCK_QUORUM_FAILED;
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
+ if (local->op_errno == 0)
+ local->op_errno = afr_quorum_errno(priv);
+ afr_unlock_locks_and_proceed(frame, this, lock_count);
+ } else {
+ goto unwind;
+ }
+
+ return 0;
+unwind:
+ afr_fop_lock_unwind(frame, local->op, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return 0;
+}
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+static int
+afr_common_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int child_index = (long)cookie;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ local = frame->local;
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_xattrop_cbk,
- priv->children[i],
- priv->children[i]->fops->xattrop,
- loc, optype, xattr, xdata);
- if (!--call_count)
- break;
- }
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (op_ret == 0 && xdata) {
+ local->replies[child_index].xdata = dict_ref(xdata);
+ LOCK(&frame->lock);
+ {
+ if (!local->xdata_rsp)
+ local->xdata_rsp = dict_ref(xdata);
}
-
- return 0;
-out:
- AFR_STACK_UNWIND (xattrop, frame, -1, op_errno, NULL, NULL);
-
- return 0;
+ UNLOCK(&frame->lock);
+ }
+ return 0;
}
-/* }}} */
-
-/* {{{ fxattrop */
+static int32_t
+afr_serialized_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
-int32_t
-afr_fxattrop_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *xattr, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int child_index = (long)cookie;
+ int next_child = 0;
- int call_count = -1;
+ local = frame->local;
+ priv = this->private;
- local = frame->local;
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
- LOCK (&frame->lock);
- {
- if (op_ret == 0) {
- if (!local->cont.fxattrop.xattr)
- local->cont.fxattrop.xattr = dict_ref (xattr);
+ for (next_child = child_index + 1; next_child < priv->child_count;
+ next_child++) {
+ if (local->child_up[next_child])
+ break;
+ }
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- local->op_ret = 0;
- }
+ if (afr_is_conflicting_lock_present(op_ret, op_errno) ||
+ (next_child == priv->child_count)) {
+ afr_fop_lock_done(frame, this);
+ } else {
+ afr_fop_lock_wind(frame, this, next_child, afr_serialized_lock_cbk);
+ }
- local->op_errno = op_errno;
- }
- UNLOCK (&frame->lock);
+ return 0;
+}
- call_count = afr_frame_return (frame);
+static int
+afr_serialized_lock_wind(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
- if (call_count == 0)
- AFR_STACK_UNWIND (fxattrop, frame, local->op_ret, local->op_errno,
- local->cont.fxattrop.xattr, local->xdata_rsp);
+ priv = this->private;
+ local = frame->local;
- return 0;
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ afr_fop_lock_wind(frame, this, i, afr_serialized_lock_cbk);
+ break;
+ }
+ }
+ return 0;
}
+static int32_t
+afr_parallel_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
-int32_t
-afr_fxattrop (call_frame_t *frame, xlator_t *this, fd_t *fd,
- gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = 0;
+ int call_count = 0;
- priv = this->private;
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ call_count = afr_frame_return(frame);
+ if (call_count == 0)
+ afr_fop_lock_done(frame, this);
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_fxattrop_cbk,
- priv->children[i],
- priv->children[i]->fops->fxattrop,
- fd, optype, xattr, xdata);
- if (!--call_count)
- break;
- }
- }
+static int
+afr_parallel_lock_wind(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int call_count = 0;
+ int i = 0;
- return 0;
-out:
- AFR_STACK_UNWIND (fxattrop, frame, -1, op_errno, NULL, NULL);
+ priv = this->private;
+ local = frame->local;
+ call_count = local->call_count;
- return 0;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->child_up[i])
+ continue;
+ afr_fop_lock_wind(frame, this, i, afr_parallel_lock_cbk);
+ if (!--call_count)
+ break;
+ }
+ return 0;
}
-/* }}} */
-
-int32_t
-afr_unlock_partial_inodelk_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret,
- int32_t op_errno, dict_t *xdata)
-
+static int
+afr_fop_handle_lock(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = -1;
- int child_index = (long)cookie;
- uuid_t gfid = {0};
+ afr_local_t *local = frame->local;
+ int op_errno = 0;
- local = frame->local;
- priv = this->private;
+ if (!afr_fop_lock_is_unlock(frame)) {
+ if (!afr_is_consistent_io_possible(local, this->private, &op_errno))
+ goto out;
- if (op_ret < 0 && op_errno != ENOTCONN) {
- loc_gfid (&local->loc, gfid);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INODE_UNLOCK_FAIL,
- "%s: Failed to unlock %s "
- "with lk_owner: %s (%s)", uuid_utoa (gfid),
- priv->children[child_index]->name,
- lkowner_utoa (&frame->root->lk_owner),
- strerror (op_errno));
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.cmd = F_SETLK;
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.cmd = ENTRYLK_LOCK_NB;
+ break;
+ default:
+ break;
}
+ }
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- AFR_STACK_UNWIND (inodelk, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
+ if (local->xdata_req) {
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.xdata = dict_ref(local->xdata_req);
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.xdata = dict_ref(local->xdata_req);
+ break;
+ default:
+ break;
}
+ }
- return 0;
+ local->fop_lock_state = AFR_FOP_LOCK_PARALLEL;
+ afr_parallel_lock_wind(frame, this);
+out:
+ return -op_errno;
}
-int32_t
-afr_unlock_inodelks_and_unwind (call_frame_t *frame, xlator_t *this,
- int call_count)
-{
- int i = 0;
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
-
- local = frame->local;
- priv = this->private;
- local->call_count = call_count;
- local->cont.inodelk.flock.l_type = F_UNLCK;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
-
- if (local->replies[i].op_ret == -1)
- continue;
-
- STACK_WIND_COOKIE (frame, afr_unlock_partial_inodelk_cbk,
- (void*) (long) i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock, 0);
-
- if (!--call_count)
- break;
- }
+static int32_t
+afr_handle_inodelk(call_frame_t *frame, xlator_t *this, glusterfs_fop_t fop,
+ const char *volume, loc_t *loc, fd_t *fd, int32_t cmd,
+ struct gf_flock *flock, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int32_t op_errno = ENOMEM;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = fop;
+ if (loc)
+ loc_copy(&local->loc, loc);
+ if (fd && (flock->l_type != F_UNLCK)) {
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local->fd = fd_ref(fd);
+ }
+
+ local->cont.inodelk.volume = gf_strdup(volume);
+ if (!local->cont.inodelk.volume) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->cont.inodelk.in_cmd = cmd;
+ local->cont.inodelk.cmd = cmd;
+ local->cont.inodelk.in_flock = *flock;
+ local->cont.inodelk.flock = *flock;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+
+ op_errno = -afr_fop_handle_lock(frame, frame->this);
+ if (op_errno)
+ goto out;
+ return 0;
+out:
+ afr_fop_lock_unwind(frame, fop, -1, op_errno, NULL);
- return 0;
+ return 0;
}
int32_t
-afr_inodelk_done (call_frame_t *frame, xlator_t *this)
+afr_inodelk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
{
- int i = 0;
- int lock_count = 0;
-
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_handle_inodelk(frame, this, GF_FOP_INODELK, volume, loc, NULL, cmd,
+ flock, xdata);
+ return 0;
+}
- local = frame->local;
- priv = this->private;
+int32_t
+afr_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+{
+ afr_handle_inodelk(frame, this, GF_FOP_FINODELK, volume, NULL, fd, cmd,
+ flock, xdata);
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
+static int
+afr_handle_entrylk(call_frame_t *frame, xlator_t *this, glusterfs_fop_t fop,
+ const char *volume, loc_t *loc, fd_t *fd,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int32_t op_errno = ENOMEM;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = fop;
+ if (loc)
+ loc_copy(&local->loc, loc);
+ if (fd && (cmd != ENTRYLK_UNLOCK)) {
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local->fd = fd_ref(fd);
+ }
+ local->cont.entrylk.cmd = cmd;
+ local->cont.entrylk.in_cmd = cmd;
+ local->cont.entrylk.type = type;
+ local->cont.entrylk.volume = gf_strdup(volume);
+ local->cont.entrylk.basename = gf_strdup(basename);
+ if (!local->cont.entrylk.volume || !local->cont.entrylk.basename) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+ op_errno = -afr_fop_handle_lock(frame, frame->this);
+ if (op_errno)
+ goto out;
+
+ return 0;
+out:
+ afr_fop_lock_unwind(frame, fop, -1, op_errno, NULL);
+ return 0;
+}
- if (local->replies[i].op_ret == 0)
- lock_count++;
+int
+afr_entrylk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
+{
+ afr_handle_entrylk(frame, this, GF_FOP_ENTRYLK, volume, loc, NULL, basename,
+ cmd, type, xdata);
+ return 0;
+}
- if (local->op_ret == -1 && local->op_errno == EAGAIN)
- continue;
+int
+afr_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
+{
+ afr_handle_entrylk(frame, this, GF_FOP_FENTRYLK, volume, NULL, fd, basename,
+ cmd, type, xdata);
+ return 0;
+}
- if ((local->replies[i].op_ret == -1) &&
- (local->replies[i].op_errno == EAGAIN)) {
- local->op_ret = -1;
- local->op_errno = EAGAIN;
- continue;
- }
+int
+afr_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct statvfs *statvfs, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int call_count = 0;
+ struct statvfs *buf = NULL;
- if (local->replies[i].op_ret == 0)
- local->op_ret = 0;
+ local = frame->local;
- local->op_errno = local->replies[i].op_errno;
+ LOCK(&frame->lock);
+ {
+ if (op_ret != 0) {
+ local->op_errno = op_errno;
+ goto unlock;
}
- if (lock_count && local->cont.inodelk.flock.l_type != F_UNLCK &&
- (local->op_ret == -1 && local->op_errno == EAGAIN)) {
- afr_unlock_inodelks_and_unwind (frame, this,
- lock_count);
+ local->op_ret = op_ret;
+
+ buf = &local->cont.statfs.buf;
+ if (local->cont.statfs.buf_set) {
+ if (statvfs->f_bavail < buf->f_bavail) {
+ *buf = *statvfs;
+ if (xdata) {
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = dict_ref(xdata);
+ }
+ }
} else {
- AFR_STACK_UNWIND (inodelk, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
+ *buf = *statvfs;
+ local->cont.statfs.buf_set = 1;
+ if (xdata)
+ local->xdata_rsp = dict_ref(xdata);
}
+ }
+unlock:
+ call_count = --local->call_count;
+ UNLOCK(&frame->lock);
- return 0;
+ if (call_count == 0)
+ AFR_STACK_UNWIND(statfs, frame, local->op_ret, local->op_errno,
+ &local->cont.statfs.buf, local->xdata_rsp);
+
+ return 0;
}
int
-afr_common_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- afr_local_t *local = NULL;
- int child_index = (long)cookie;
+afr_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_count = 0;
+ int32_t op_errno = ENOMEM;
+
+ priv = this->private;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_STATFS;
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
+
+ if (priv->arbiter_count == 1 && local->child_up[ARBITER_BRICK_INDEX])
+ local->call_count--;
+ call_count = local->call_count;
+ if (!call_count) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ if (AFR_IS_ARBITER_BRICK(priv, i))
+ continue;
+ STACK_WIND(frame, afr_statfs_cbk, priv->children[i],
+ priv->children[i]->fops->statfs, loc, xdata);
+ if (!--call_count)
+ break;
+ }
+ }
- local = frame->local;
+ return 0;
+out:
+ AFR_STACK_UNWIND(statfs, frame, -1, op_errno, NULL, NULL);
- local->replies[child_index].valid = 1;
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
- if (op_ret == 0 && xdata) {
- local->replies[child_index].xdata = dict_ref (xdata);
- LOCK (&frame->lock);
- {
- if (!local->xdata_rsp)
- local->xdata_rsp = dict_ref (xdata);
- }
- UNLOCK (&frame->lock);
- }
- return 0;
+ return 0;
}
-static int32_t
-afr_parallel_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-
+int32_t
+afr_lk_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
{
- int call_count = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = this->private;
+ int call_count = -1;
+ int child_index = (long)cookie;
- afr_common_inodelk_cbk (frame, cookie, this, op_ret, op_errno, xdata);
+ local = frame->local;
- call_count = afr_frame_return (frame);
- if (call_count == 0)
- afr_inodelk_done (frame, this);
+ if (op_ret < 0 && op_errno != ENOTCONN && op_errno != EBADFD) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_UNLOCK_FAIL,
+ "gfid=%s: unlock failed on subvolume %s "
+ "with lock owner %s",
+ uuid_utoa(local->fd->inode->gfid),
+ priv->children[child_index]->name,
+ lkowner_utoa(&frame->root->lk_owner));
+ }
- return 0;
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno, NULL,
+ local->xdata_rsp);
+ }
+
+ return 0;
}
-static inline gf_boolean_t
-afr_is_conflicting_lock_present (int32_t op_ret, int32_t op_errno)
+int32_t
+afr_lk_unlock(call_frame_t *frame, xlator_t *this)
{
- if (op_ret == -1 && op_errno == EAGAIN)
- return _gf_true;
- return _gf_false;
-}
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_count = 0;
-static int32_t
-afr_serialized_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+ local = frame->local;
+ priv = this->private;
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int child_index = (long)cookie;
- int next_child = 0;
+ call_count = afr_locked_nodes_count(local->cont.lk.locked_nodes,
+ priv->child_count);
- local = frame->local;
- priv = this->private;
+ if (call_count == 0) {
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno, NULL,
+ local->xdata_rsp);
+ return 0;
+ }
- afr_common_inodelk_cbk (frame, cookie, this, op_ret, op_errno, xdata);
+ local->call_count = call_count;
- for (next_child = child_index + 1; next_child < priv->child_count;
- next_child++) {
- if (local->child_up[next_child])
- break;
- }
+ local->cont.lk.user_flock.l_type = F_UNLCK;
- if (afr_is_conflicting_lock_present (op_ret, op_errno) ||
- (next_child == priv->child_count)) {
- afr_inodelk_done (frame, this);
- } else {
- STACK_WIND_COOKIE (frame, afr_serialized_inodelk_cbk,
- (void *) (long) next_child,
- priv->children[next_child],
- priv->children[next_child]->fops->inodelk,
- (const char *)local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock,
- local->xdata_req);
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->cont.lk.locked_nodes[i]) {
+ STACK_WIND_COOKIE(frame, afr_lk_unlock_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->lk,
+ local->fd, F_SETLK, &local->cont.lk.user_flock,
+ NULL);
+
+ if (!--call_count)
+ break;
}
+ }
- return 0;
+ return 0;
}
-static int
-afr_parallel_inodelk_wind (call_frame_t *frame, xlator_t *this)
+int32_t
+afr_lk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int call_count = 0;
- int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int child_index = -1;
- priv = this->private;
- local = frame->local;
- call_count = local->call_count;
+ local = frame->local;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (!local->child_up[i])
- continue;
- STACK_WIND_COOKIE (frame, afr_parallel_inodelk_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- (const char *)local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock,
- local->xdata_req);
- if (!--call_count)
- break;
- }
+ child_index = (long)cookie;
+
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
+ if (op_ret < 0 && op_errno == EAGAIN) {
+ local->op_ret = -1;
+ local->op_errno = EAGAIN;
+
+ afr_lk_unlock(frame, this);
return 0;
-}
+ }
+
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ local->op_errno = 0;
+ local->cont.lk.locked_nodes[child_index] = 1;
+ local->cont.lk.ret_flock = *lock;
+ }
+
+ child_index++;
+
+ if (child_index < priv->child_count) {
+ STACK_WIND_COOKIE(frame, afr_lk_cbk, (void *)(long)child_index,
+ priv->children[child_index],
+ priv->children[child_index]->fops->lk, local->fd,
+ local->cont.lk.cmd, &local->cont.lk.user_flock,
+ local->xdata_req);
+ } else if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lk.locked_nodes, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
-static int
-afr_serialized_inodelk_wind (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
+ afr_lk_unlock(frame, this);
+ } else {
+ if (local->op_ret < 0)
+ local->op_errno = afr_final_errno(local, priv);
- priv = this->private;
- local = frame->local;
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno,
+ &local->cont.lk.ret_flock, local->xdata_rsp);
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_serialized_inodelk_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- (const char *)local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock,
- local->xdata_req);
- break;
- }
- }
- return 0;
+ return 0;
}
-int32_t
-afr_inodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, int32_t cmd,
- struct gf_flock *flock, dict_t *xdata)
+int
+afr_lk_transaction_cbk(int ret, call_frame_t *frame, void *opaque)
{
- afr_local_t *local = NULL;
- int32_t op_errno = ENOMEM;
+ return 0;
+}
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+int
+afr_lk_txn_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int child_index = -1;
+
+ local = frame->local;
+ child_index = (long)cookie;
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ local->op_errno = 0;
+ local->cont.lk.locked_nodes[child_index] = 1;
+ local->cont.lk.ret_flock = *lock;
+ }
+ syncbarrier_wake(&local->barrier);
+ return 0;
+}
- loc_copy (&local->loc, loc);
- local->cont.inodelk.volume = gf_strdup (volume);
- if (!local->cont.inodelk.volume) {
- op_errno = ENOMEM;
- goto out;
+int
+afr_lk_txn_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int child_index = (long)cookie;
+
+ if (op_ret < 0 && op_errno != ENOTCONN && op_errno != EBADFD) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_UNLOCK_FAIL,
+ "gfid=%s: unlock failed on subvolume %s "
+ "with lock owner %s",
+ uuid_utoa(local->fd->inode->gfid),
+ priv->children[child_index]->name,
+ lkowner_utoa(&frame->root->lk_owner));
+ }
+ return 0;
+}
+int
+afr_lk_transaction(void *opaque)
+{
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ char *wind_on = NULL;
+ int op_errno = 0;
+ int i = 0;
+ int ret = 0;
+
+ frame = (call_frame_t *)opaque;
+ local = frame->local;
+ this = frame->this;
+ priv = this->private;
+ wind_on = alloca0(priv->child_count);
+
+ if (priv->arbiter_count || priv->child_count != 3) {
+ op_errno = ENOTSUP;
+ gf_msg(frame->this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "%s: Lock healing supported only for replica 3 volumes.",
+ uuid_utoa(local->fd->inode->gfid));
+ goto err;
+ }
+
+ op_errno = -afr_dom_lock_acquire(frame); // Released during
+ // AFR_STACK_UNWIND
+ if (op_errno != 0) {
+ goto err;
+ }
+ if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lk.dom_locked_nodes, this, NULL)) {
+ op_errno = afr_final_errno(local, priv);
+ goto err;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->child_up[i] && local->cont.lk.dom_locked_nodes[i])
+ wind_on[i] = 1;
+ }
+ AFR_ONLIST(wind_on, frame, afr_lk_txn_wind_cbk, lk, local->fd,
+ local->cont.lk.cmd, &local->cont.lk.user_flock,
+ local->xdata_req);
+
+ if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lk.locked_nodes, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
+ goto unlock;
+ } else {
+ if (local->cont.lk.user_flock.l_type == F_UNLCK)
+ ret = afr_remove_lock_from_saved_locks(local, this);
+ else
+ ret = afr_add_lock_to_saved_locks(frame, this);
+ if (ret) {
+ local->op_ret = -1;
+ local->op_errno = -ret;
+ goto unlock;
}
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno,
+ &local->cont.lk.ret_flock, local->xdata_rsp);
+ }
- local->cont.inodelk.cmd = cmd;
- local->cont.inodelk.flock = *flock;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
-
- /* At least one child is up */
- /*
- * Non-blocking locks also need to be serialized. Otherwise there is
- * a chance that both the mounts which issued same non-blocking inodelk
- * may endup not acquiring the lock on any-brick.
- * Ex: Mount1 and Mount2
- * request for full length lock on file f1. Mount1 afr may acquire the
- * partial lock on brick-1 and may not acquire the lock on brick-2
- * because Mount2 already got the lock on brick-2, vice versa. Since
- * both the mounts only got partial locks, afr treats them as failure in
- * gaining the locks and unwinds with EAGAIN errno.
- */
- if (flock->l_type == F_UNLCK) {
- afr_parallel_inodelk_wind (frame, this);
- } else {
- afr_serialized_inodelk_wind (frame, this);
+ return 0;
+
+unlock:
+ local->cont.lk.user_flock.l_type = F_UNLCK;
+ AFR_ONLIST(local->cont.lk.locked_nodes, frame, afr_lk_txn_unlock_cbk, lk,
+ local->fd, F_SETLK, &local->cont.lk.user_flock, NULL);
+err:
+ AFR_STACK_UNWIND(lk, frame, -1, op_errno, NULL, NULL);
+ return -1;
+}
+
+int
+afr_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ struct gf_flock *flock, dict_t *xdata)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int ret = 0;
+ int i = 0;
+ int32_t op_errno = ENOMEM;
+
+ priv = this->private;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_LK;
+ if (!afr_lk_is_unlock(cmd, flock)) {
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
+ }
+
+ local->cont.lk.locked_nodes = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.locked_nodes),
+ gf_afr_mt_char);
+
+ if (!local->cont.lk.locked_nodes) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->fd = fd_ref(fd);
+ local->cont.lk.cmd = cmd;
+ local->cont.lk.user_flock = *flock;
+ local->cont.lk.ret_flock = *flock;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+
+ if (afr_is_lock_mode_mandatory(xdata)) {
+ ret = synctask_new(this->ctx->env, afr_lk_transaction,
+ afr_lk_transaction_cbk, frame, frame);
+ if (ret) {
+ op_errno = ENOMEM;
+ goto out;
}
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(frame, afr_lk_cbk, (void *)(long)0, priv->children[i],
+ priv->children[i]->fops->lk, fd, cmd, flock,
+ local->xdata_req);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (inodelk, frame, -1, op_errno, NULL);
+ AFR_STACK_UNWIND(lk, frame, -1, op_errno, NULL, NULL);
- return 0;
+ return 0;
}
-
int32_t
-afr_finodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-
+afr_lease_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_lease *lease,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_count = -1;
-
- local = frame->local;
-
- LOCK (&frame->lock);
- {
- if (op_ret == 0)
- local->op_ret = 0;
-
- local->op_errno = op_errno;
- }
- UNLOCK (&frame->lock);
+ afr_local_t *local = NULL;
+ int call_count = -1;
- call_count = afr_frame_return (frame);
+ local = frame->local;
+ call_count = afr_frame_return(frame);
- if (call_count == 0)
- AFR_STACK_UNWIND (finodelk, frame, local->op_ret,
- local->op_errno, xdata);
+ if (call_count == 0)
+ AFR_STACK_UNWIND(lease, frame, local->op_ret, local->op_errno, lease,
+ xdata);
- return 0;
+ return 0;
}
-
int32_t
-afr_finodelk (call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
- int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+afr_lease_unlock(call_frame_t *frame, xlator_t *this)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_count = 0;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ call_count = afr_locked_nodes_count(local->cont.lease.locked_nodes,
+ priv->child_count);
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ if (call_count == 0) {
+ AFR_STACK_UNWIND(lease, frame, local->op_ret, local->op_errno,
+ &local->cont.lease.ret_lease, NULL);
+ return 0;
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_finodelk_cbk,
- priv->children[i],
- priv->children[i]->fops->finodelk,
- volume, fd, cmd, flock, xdata);
-
- if (!--call_count)
- break;
- }
- }
+ local->call_count = call_count;
- return 0;
-out:
- AFR_STACK_UNWIND (finodelk, frame, -1, op_errno, NULL);
+ local->cont.lease.user_lease.cmd = GF_UNLK_LEASE;
- return 0;
-}
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->cont.lease.locked_nodes[i]) {
+ STACK_WIND(frame, afr_lease_unlock_cbk, priv->children[i],
+ priv->children[i]->fops->lease, &local->loc,
+ &local->cont.lease.user_lease, NULL);
+ if (!--call_count)
+ break;
+ }
+ }
+
+ return 0;
+}
int32_t
-afr_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_lease_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct gf_lease *lease, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_count = -1;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int child_index = -1;
- local = frame->local;
+ local = frame->local;
+ priv = this->private;
- LOCK (&frame->lock);
- {
- if (op_ret == 0)
- local->op_ret = 0;
+ child_index = (long)cookie;
- local->op_errno = op_errno;
- }
- UNLOCK (&frame->lock);
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
+ if (op_ret < 0 && op_errno == EAGAIN) {
+ local->op_ret = -1;
+ local->op_errno = EAGAIN;
- call_count = afr_frame_return (frame);
+ afr_lease_unlock(frame, this);
+ return 0;
+ }
+
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ local->op_errno = 0;
+ local->cont.lease.locked_nodes[child_index] = 1;
+ local->cont.lease.ret_lease = *lease;
+ }
+
+ child_index++;
+ if (child_index < priv->child_count) {
+ STACK_WIND_COOKIE(frame, afr_lease_cbk, (void *)(long)child_index,
+ priv->children[child_index],
+ priv->children[child_index]->fops->lease, &local->loc,
+ &local->cont.lease.user_lease, xdata);
+ } else if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lease.locked_nodes, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
- if (call_count == 0)
- AFR_STACK_UNWIND (entrylk, frame, local->op_ret,
- local->op_errno, xdata);
+ afr_lease_unlock(frame, this);
+ } else {
+ if (local->op_ret < 0)
+ local->op_errno = afr_final_errno(local, priv);
+ AFR_STACK_UNWIND(lease, frame, local->op_ret, local->op_errno,
+ &local->cont.lease.ret_lease, NULL);
+ }
- return 0;
+ return 0;
}
-
int
-afr_entrylk (call_frame_t *frame, xlator_t *this, const char *volume,
- loc_t *loc, const char *basename, entrylk_cmd cmd,
- entrylk_type type, dict_t *xdata)
+afr_lease(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct gf_lease *lease, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int32_t op_errno = ENOMEM;
- priv = this->private;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ local->op = GF_FOP_LEASE;
+ local->cont.lease.locked_nodes = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lease.locked_nodes),
+ gf_afr_mt_char);
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_entrylk_cbk,
- priv->children[i],
- priv->children[i]->fops->entrylk,
- volume, loc, basename, cmd, type, xdata);
-
- if (!--call_count)
- break;
- }
- }
+ if (!local->cont.lease.locked_nodes) {
+ op_errno = ENOMEM;
+ goto out;
+ }
- return 0;
-out:
- AFR_STACK_UNWIND (entrylk, frame, -1, op_errno, NULL);
+ loc_copy(&local->loc, loc);
+ local->cont.lease.user_lease = *lease;
+ local->cont.lease.ret_lease = *lease;
- return 0;
-}
+ STACK_WIND_COOKIE(frame, afr_lease_cbk, (void *)(long)0, priv->children[0],
+ priv->children[0]->fops->lease, loc, lease, xdata);
+ return 0;
+out:
+ AFR_STACK_UNWIND(lease, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
int
-afr_fentrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-
-{
- afr_local_t *local = NULL;
- int call_count = -1;
-
- local = frame->local;
-
- LOCK (&frame->lock);
- {
- if (op_ret == 0)
- local->op_ret = 0;
-
- local->op_errno = op_errno;
- }
- UNLOCK (&frame->lock);
-
- call_count = afr_frame_return (frame);
+afr_ipc_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int child_index = (long)cookie;
+ int call_count = 0;
+ gf_boolean_t failed = _gf_false;
+ gf_boolean_t succeeded = _gf_false;
+ int i = 0;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
+
+ call_count = afr_frame_return(frame);
+ if (call_count)
+ goto out;
+ /* If any of the subvolumes failed with other than ENOTCONN
+ * return error else return success unless all the subvolumes
+ * failed.
+ * TODO: In case of failure, we need to unregister the xattrs
+ * from the other subvolumes where it succeeded (once upcall
+ * fixes the Bz-1371622)*/
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret < 0 &&
+ local->replies[i].op_errno != ENOTCONN) {
+ local->op_ret = local->replies[i].op_ret;
+ local->op_errno = local->replies[i].op_errno;
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = NULL;
+ if (local->replies[i].xdata) {
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ }
+ failed = _gf_true;
+ break;
+ }
+ if (local->replies[i].op_ret == 0) {
+ succeeded = _gf_true;
+ local->op_ret = 0;
+ local->op_errno = 0;
+ if (!local->xdata_rsp && local->replies[i].xdata) {
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ }
+ }
+ }
+
+ if (!succeeded && !failed) {
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ }
- if (call_count == 0)
- AFR_STACK_UNWIND (fentrylk, frame, local->op_ret,
- local->op_errno, xdata);
+ AFR_STACK_UNWIND(ipc, frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
- return 0;
+out:
+ return 0;
}
-
int
-afr_fentrylk (call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
- const char *basename, entrylk_cmd cmd, entrylk_type type,
- dict_t *xdata)
+afr_ipc(call_frame_t *frame, xlator_t *this, int32_t op, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ int32_t op_errno = -1;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_cnt = -1;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
- priv = this->private;
+ if (op != GF_IPC_TARGET_UPCALL)
+ goto wind_default;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ VALIDATE_OR_GOTO(this->private, err);
+ priv = this->private;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto err;
+ call_cnt = local->call_count;
+
+ if (xdata) {
for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_fentrylk_cbk,
- priv->children[i],
- priv->children[i]->fops->fentrylk,
- volume, fd, basename, cmd, type, xdata);
-
- if (!--call_count)
- break;
- }
+ if (dict_set_int8(xdata, priv->pending_key[i], 0) < 0)
+ goto err;
}
+ }
- return 0;
-out:
- AFR_STACK_UNWIND (fentrylk, frame, -1, op_errno, NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->child_up[i])
+ continue;
- return 0;
-}
+ STACK_WIND_COOKIE(frame, afr_ipc_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->ipc, op,
+ xdata);
+ if (!--call_cnt)
+ break;
+ }
+ return 0;
+err:
+ if (op_errno == -1)
+ op_errno = errno;
+ AFR_STACK_UNWIND(ipc, frame, -1, op_errno, NULL);
+
+ return 0;
+
+wind_default:
+ STACK_WIND(frame, default_ipc_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ipc, op, xdata);
+ return 0;
+}
int
-afr_statfs_cbk (call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
- int op_errno, struct statvfs *statvfs, dict_t *xdata)
+afr_forget(xlator_t *this, inode_t *inode)
{
- afr_local_t *local = NULL;
- int call_count = 0;
- struct statvfs *buf = NULL;
+ uint64_t ctx_int = 0;
+ afr_inode_ctx_t *ctx = NULL;
- LOCK (&frame->lock);
- {
- local = frame->local;
-
- if (op_ret != 0) {
- local->op_errno = op_errno;
- goto unlock;
- }
-
- local->op_ret = op_ret;
-
- buf = &local->cont.statfs.buf;
- if (local->cont.statfs.buf_set) {
- if (statvfs->f_bavail < buf->f_bavail) {
- *buf = *statvfs;
- if (xdata) {
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
- local->xdata_rsp = dict_ref (xdata);
- }
- }
- } else {
- *buf = *statvfs;
- local->cont.statfs.buf_set = 1;
- if (xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
- }
-unlock:
- UNLOCK (&frame->lock);
-
- call_count = afr_frame_return (frame);
+ afr_spb_choice_timeout_cancel(this, inode);
+ inode_ctx_del(inode, this, &ctx_int);
+ if (!ctx_int)
+ return 0;
- if (call_count == 0)
- AFR_STACK_UNWIND (statfs, frame, local->op_ret, local->op_errno,
- &local->cont.statfs.buf, local->xdata_rsp);
+ ctx = (afr_inode_ctx_t *)(uintptr_t)ctx_int;
+ afr_inode_ctx_destroy(ctx);
+ return 0;
+}
- return 0;
+int
+afr_priv_dump(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ char key_prefix[GF_DUMP_MAX_BUF_LEN];
+ char key[GF_DUMP_MAX_BUF_LEN];
+ int i = 0;
+
+ GF_ASSERT(this);
+ priv = this->private;
+
+ GF_ASSERT(priv);
+ snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name);
+ gf_proc_dump_add_section("%s", key_prefix);
+ gf_proc_dump_write("child_count", "%u", priv->child_count);
+ for (i = 0; i < priv->child_count; i++) {
+ sprintf(key, "child_up[%d]", i);
+ gf_proc_dump_write(key, "%d", priv->child_up[i]);
+ sprintf(key, "pending_key[%d]", i);
+ gf_proc_dump_write(key, "%s", priv->pending_key[i]);
+ sprintf(key, "pending_reads[%d]", i);
+ gf_proc_dump_write(key, "%" PRId64,
+ GF_ATOMIC_GET(priv->pending_reads[i]));
+ sprintf(key, "child_latency[%d]", i);
+ gf_proc_dump_write(key, "%" PRId64, priv->child_latency[i]);
+ sprintf(key, "halo_child_up[%d]", i);
+ gf_proc_dump_write(key, "%d", priv->halo_child_up[i]);
+ }
+ gf_proc_dump_write("data_self_heal", "%d", priv->data_self_heal);
+ gf_proc_dump_write("metadata_self_heal", "%d", priv->metadata_self_heal);
+ gf_proc_dump_write("entry_self_heal", "%d", priv->entry_self_heal);
+ gf_proc_dump_write("read_child", "%d", priv->read_child);
+ gf_proc_dump_write("wait_count", "%u", priv->wait_count);
+ gf_proc_dump_write("heal-wait-queue-length", "%d", priv->heal_wait_qlen);
+ gf_proc_dump_write("heal-waiters", "%d", priv->heal_waiters);
+ gf_proc_dump_write("background-self-heal-count", "%d",
+ priv->background_self_heal_count);
+ gf_proc_dump_write("healers", "%d", priv->healers);
+ gf_proc_dump_write("read-hash-mode", "%d", priv->hash_mode);
+ gf_proc_dump_write("use-anonymous-inode", "%d", priv->use_anon_inode);
+ if (priv->quorum_count == AFR_QUORUM_AUTO) {
+ gf_proc_dump_write("quorum-type", "auto");
+ } else if (priv->quorum_count == 0) {
+ gf_proc_dump_write("quorum-type", "none");
+ } else {
+ gf_proc_dump_write("quorum-type", "fixed");
+ gf_proc_dump_write("quorum-count", "%d", priv->quorum_count);
+ }
+ gf_proc_dump_write("up", "%u", afr_has_quorum(priv->child_up, this, NULL));
+ if (priv->thin_arbiter_count) {
+ gf_proc_dump_write("ta_child_up", "%d", priv->ta_child_up);
+ gf_proc_dump_write("ta_bad_child_index", "%d",
+ priv->ta_bad_child_index);
+ gf_proc_dump_write("ta_notify_dom_lock_offset", "%" PRId64,
+ priv->ta_notify_dom_lock_offset);
+ }
+
+ return 0;
}
+/**
+ * find_child_index - find the child's index in the array of subvolumes
+ * @this: AFR
+ * @child: child
+ */
-int
-afr_statfs (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+static int
+afr_find_child_index(xlator_t *this, xlator_t *child)
{
- afr_local_t * local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- int call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ int child_count = -1;
+ int i = -1;
- priv = this->private;
+ priv = this->private;
+ child_count = priv->child_count;
+ if (priv->thin_arbiter_count) {
+ child_count++;
+ }
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ for (i = 0; i < child_count; i++) {
+ if ((xlator_t *)child == priv->children[i])
+ break;
+ }
- if (priv->arbiter_count == 1 && local->child_up[ARBITER_BRICK_INDEX])
- local->call_count--;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ return i;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- if (AFR_IS_ARBITER_BRICK(priv, i))
- continue;
- STACK_WIND (frame, afr_statfs_cbk,
- priv->children[i],
- priv->children[i]->fops->statfs,
- loc, xdata);
- if (!--call_count)
- break;
- }
- }
+int
+__afr_get_up_children_count(afr_private_t *priv)
+{
+ int up_children = 0;
+ int i = 0;
- return 0;
-out:
- AFR_STACK_UNWIND (statfs, frame, -1, op_errno, NULL, NULL);
+ for (i = 0; i < priv->child_count; i++)
+ if (priv->child_up[i] == 1)
+ up_children++;
- return 0;
+ return up_children;
}
-
-int32_t
-afr_lk_unlock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
- dict_t *xdata)
+static int
+__get_heard_from_all_status(xlator_t *this)
{
- afr_local_t * local = NULL;
- int call_count = -1;
-
- local = frame->local;
- call_count = afr_frame_return (frame);
-
- if (call_count == 0)
- AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
- lock, xdata);
+ afr_private_t *priv = this->private;
+ int i;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!priv->last_event[i]) {
+ return 0;
+ }
+ }
+ if (priv->thin_arbiter_count && !priv->ta_child_up) {
return 0;
+ }
+ return 1;
}
-
-int32_t
-afr_lk_unlock (call_frame_t *frame, xlator_t *this)
+glusterfs_event_t
+__afr_transform_event_from_state(xlator_t *this)
{
- afr_local_t * local = NULL;
- afr_private_t * priv = NULL;
- int i = 0;
- int call_count = 0;
-
- local = frame->local;
- priv = this->private;
-
- call_count = afr_locked_nodes_count (local->cont.lk.locked_nodes,
- priv->child_count);
+ int i = 0;
+ int up_children = 0;
+ afr_private_t *priv = this->private;
- if (call_count == 0) {
- AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
- &local->cont.lk.ret_flock, NULL);
- return 0;
- }
-
- local->call_count = call_count;
+ if (__get_heard_from_all_status(this))
+ /* have_heard_from_all. Let afr_notify() do the propagation. */
+ return GF_EVENT_MAXVAL;
- local->cont.lk.user_flock.l_type = F_UNLCK;
+ up_children = __afr_get_up_children_count(priv);
+ /* Treat the children with pending notification, as having sent a
+ * GF_EVENT_CHILD_DOWN. i.e. set the event as GF_EVENT_SOME_DESCENDENT_DOWN,
+ * as done in afr_notify() */
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->last_event[i])
+ continue;
+ priv->last_event[i] = GF_EVENT_SOME_DESCENDENT_DOWN;
+ priv->child_up[i] = 0;
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (local->cont.lk.locked_nodes[i]) {
- STACK_WIND (frame, afr_lk_unlock_cbk,
- priv->children[i],
- priv->children[i]->fops->lk,
- local->fd, F_SETLK,
- &local->cont.lk.user_flock, NULL);
-
- if (!--call_count)
- break;
- }
- }
+ if (up_children)
+ /* We received at least one child up */
+ return GF_EVENT_CHILD_UP;
+ else
+ return GF_EVENT_CHILD_DOWN;
- return 0;
+ return GF_EVENT_MAXVAL;
}
+static void
+afr_notify_cbk(void *data)
+{
+ xlator_t *this = data;
+ afr_private_t *priv = this->private;
+ glusterfs_event_t event = GF_EVENT_MAXVAL;
+ gf_boolean_t propagate = _gf_false;
+
+ LOCK(&priv->lock);
+ {
+ if (!priv->timer) {
+ /*
+ * Either child_up/child_down is already sent to parent.
+ * This is a spurious wake up.
+ */
+ goto unlock;
+ }
+ priv->timer = NULL;
+ event = __afr_transform_event_from_state(this);
+ if (event != GF_EVENT_MAXVAL)
+ propagate = _gf_true;
+ }
+unlock:
+ UNLOCK(&priv->lock);
+ if (propagate)
+ default_notify(this, event, NULL);
+}
-int32_t
-afr_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
+static void
+__afr_launch_notify_timer(xlator_t *this, afr_private_t *priv)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int child_index = -1;
-/* int ret = 0; */
-
+ struct timespec delay = {
+ 0,
+ };
- local = frame->local;
- priv = this->private;
+ gf_msg_debug(this->name, 0, "Initiating child-down timer");
+ delay.tv_sec = 10;
+ delay.tv_nsec = 0;
+ priv->timer = gf_timer_call_after(this->ctx, delay, afr_notify_cbk, this);
+ if (priv->timer == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_TIMER_CREATE_FAIL,
+ "Cannot create timer for delayed initialization");
+ }
+}
- child_index = (long) cookie;
+static int
+find_best_down_child(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ int i = -1;
+ int32_t best_child = -1;
+ int64_t best_latency = INT64_MAX;
- if (!child_went_down (op_ret, op_errno) && (op_ret == -1)) {
- local->op_ret = -1;
- local->op_errno = op_errno;
+ priv = this->private;
- afr_lk_unlock (frame, this);
- return 0;
- }
-
- if (op_ret == 0) {
- local->op_ret = 0;
- local->op_errno = 0;
- local->cont.lk.locked_nodes[child_index] = 1;
- local->cont.lk.ret_flock = *lock;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!priv->child_up[i] && priv->child_latency[i] >= 0 &&
+ priv->child_latency[i] < best_latency) {
+ best_child = i;
+ best_latency = priv->child_latency[i];
}
+ }
+ if (best_child >= 0) {
+ gf_msg_debug(this->name, 0,
+ "Found best down child (%d) @ %" PRId64 " ms latency",
+ best_child, best_latency);
+ }
+ return best_child;
+}
- child_index++;
+int
+find_worst_up_child(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ int i = -1;
+ int32_t worst_child = -1;
+ int64_t worst_latency = INT64_MIN;
- if (child_index < priv->child_count) {
- STACK_WIND_COOKIE (frame, afr_lk_cbk, (void *) (long) child_index,
- priv->children[child_index],
- priv->children[child_index]->fops->lk,
- local->fd, local->cont.lk.cmd,
- &local->cont.lk.user_flock, xdata);
- } else if (local->op_ret == -1) {
- /* all nodes have gone down */
+ priv = this->private;
- AFR_STACK_UNWIND (lk, frame, -1, ENOTCONN,
- &local->cont.lk.ret_flock, NULL);
- } else {
- AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
- &local->cont.lk.ret_flock, NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->child_up[i] && priv->child_latency[i] >= 0 &&
+ priv->child_latency[i] > worst_latency) {
+ worst_child = i;
+ worst_latency = priv->child_latency[i];
}
-
- return 0;
+ }
+ if (worst_child >= 0) {
+ gf_msg_debug(this->name, 0,
+ "Found worst up child (%d) @ %" PRId64 " ms latency",
+ worst_child, worst_latency);
+ }
+ return worst_child;
}
-
-int
-afr_lk (call_frame_t *frame, xlator_t *this,
- fd_t *fd, int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+void
+__afr_handle_ping_event(xlator_t *this, xlator_t *child_xlator, const int idx,
+ int64_t halo_max_latency_msec, int32_t *event,
+ int64_t child_latency_msec)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ int up_children = 0;
- priv = this->private;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ priv->child_latency[idx] = child_latency_msec;
+ gf_msg_debug(child_xlator->name, 0, "Client ping @ %" PRId64 " ms",
+ child_latency_msec);
+ if (priv->shd.iamshd)
+ return;
- local->cont.lk.locked_nodes = GF_CALLOC (priv->child_count,
- sizeof (*local->cont.lk.locked_nodes),
- gf_afr_mt_char);
+ up_children = __afr_get_up_children_count(priv);
- if (!local->cont.lk.locked_nodes) {
- op_errno = ENOMEM;
- goto out;
+ if (child_latency_msec > halo_max_latency_msec &&
+ priv->child_up[idx] == 1 && up_children > priv->halo_min_replicas) {
+ if ((up_children - 1) < priv->halo_min_replicas) {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Overriding halo threshold, "
+ "min replicas: %d",
+ priv->halo_min_replicas);
+ } else {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Child latency (%" PRId64
+ " ms) "
+ "exceeds halo threshold (%" PRId64
+ "), "
+ "marking child down.",
+ child_latency_msec, halo_max_latency_msec);
+ if (priv->halo_child_up[idx]) {
+ *event = GF_EVENT_CHILD_DOWN;
+ }
+ }
+ } else if (child_latency_msec < halo_max_latency_msec &&
+ priv->child_up[idx] == 0) {
+ if (up_children < priv->halo_max_replicas) {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Child latency (%" PRId64
+ " ms) "
+ "below halo threshold (%" PRId64
+ "), "
+ "marking child up.",
+ child_latency_msec, halo_max_latency_msec);
+ if (priv->halo_child_up[idx]) {
+ *event = GF_EVENT_CHILD_UP;
+ }
+ } else {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Not marking child %d up, "
+ "max replicas (%d) reached.",
+ idx, priv->halo_max_replicas);
}
+ }
+}
- local->fd = fd_ref (fd);
- local->cont.lk.cmd = cmd;
- local->cont.lk.user_flock = *flock;
- local->cont.lk.ret_flock = *flock;
+static int64_t
+afr_get_halo_latency(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ int64_t halo_max_latency_msec = 0;
+
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_lk_cbk, (void *) (long) 0,
- priv->children[i],
- priv->children[i]->fops->lk,
- fd, cmd, flock, xdata);
+ if (priv->shd.iamshd) {
+ halo_max_latency_msec = priv->shd.halo_max_latency_msec;
+ } else if (priv->nfsd.iamnfsd) {
+ halo_max_latency_msec = priv->nfsd.halo_max_latency_msec;
+ } else {
+ halo_max_latency_msec = priv->halo_max_latency_msec;
+ }
+ gf_msg_debug(this->name, 0, "Using halo latency %" PRId64,
+ halo_max_latency_msec);
+ return halo_max_latency_msec;
+}
- return 0;
+void
+__afr_handle_child_up_event(xlator_t *this, xlator_t *child_xlator,
+ const int idx, int64_t child_latency_msec,
+ int32_t *event, int32_t *call_psh,
+ int32_t *up_child)
+{
+ afr_private_t *priv = NULL;
+ int up_children = 0;
+ int worst_up_child = -1;
+ int64_t halo_max_latency_msec = afr_get_halo_latency(this);
+
+ priv = this->private;
+
+ /*
+ * This only really counts if the child was never up
+ * (value = -1) or had been down (value = 0). See
+ * comment at GF_EVENT_CHILD_DOWN for a more detailed
+ * explanation.
+ */
+ if (priv->child_up[idx] != 1) {
+ priv->event_generation++;
+ }
+ priv->child_up[idx] = 1;
+
+ *call_psh = 1;
+ *up_child = idx;
+ up_children = __afr_get_up_children_count(priv);
+ /*
+ * If this is an _actual_ CHILD_UP event, we
+ * want to set the child_latency to MAX to indicate
+ * the child needs ping data to be available before doing child-up
+ */
+ if (!priv->halo_enabled)
+ goto out;
+
+ if (child_latency_msec < 0) {
+ /*set to INT64_MAX-1 so that it is found for best_down_child*/
+ priv->halo_child_up[idx] = 1;
+ if (priv->child_latency[idx] < 0) {
+ priv->child_latency[idx] = AFR_HALO_MAX_LATENCY;
+ }
+ }
+
+ /*
+ * Handle the edge case where we exceed
+ * halo_min_replicas and we've got a child which is
+ * marked up as it was helping to satisfy the
+ * halo_min_replicas even though it's latency exceeds
+ * halo_max_latency_msec.
+ */
+ if (up_children > priv->halo_min_replicas) {
+ worst_up_child = find_worst_up_child(this);
+ if (worst_up_child >= 0 &&
+ priv->child_latency[worst_up_child] > halo_max_latency_msec) {
+ gf_msg_debug(this->name, 0,
+ "Marking child %d down, "
+ "doesn't meet halo threshold (%" PRId64
+ "), and > "
+ "halo_min_replicas (%d)",
+ worst_up_child, halo_max_latency_msec,
+ priv->halo_min_replicas);
+ priv->child_up[worst_up_child] = 0;
+ up_children--;
+ }
+ }
+
+ if (up_children > priv->halo_max_replicas && !priv->shd.iamshd) {
+ worst_up_child = find_worst_up_child(this);
+ if (worst_up_child < 0) {
+ worst_up_child = idx;
+ }
+ priv->child_up[worst_up_child] = 0;
+ up_children--;
+ gf_msg_debug(this->name, 0,
+ "Marking child %d down, "
+ "up_children (%d) > halo_max_replicas (%d)",
+ worst_up_child, up_children, priv->halo_max_replicas);
+ }
out:
- AFR_STACK_UNWIND (lk, frame, -1, op_errno, NULL, NULL);
+ if (up_children == 1) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SUBVOL_UP,
+ "Subvolume '%s' came back up; "
+ "going online.",
+ child_xlator->name);
+ gf_event(EVENT_AFR_SUBVOL_UP, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ } else {
+ *event = GF_EVENT_SOME_DESCENDENT_UP;
+ }
- return 0;
+ priv->last_event[idx] = *event;
}
-int
-afr_forget (xlator_t *this, inode_t *inode)
+void
+__afr_handle_child_down_event(xlator_t *this, xlator_t *child_xlator, int idx,
+ int64_t child_latency_msec, int32_t *event,
+ int32_t *call_psh, int32_t *up_child)
+{
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int up_children = 0;
+ int down_children = 0;
+ int best_down_child = -1;
+
+ priv = this->private;
+
+ /*
+ * If a brick is down when we start, we'll get a
+ * CHILD_DOWN to indicate its initial state. There
+ * was never a CHILD_UP in this case, so if we
+ * increment "down_count" the difference between than
+ * and "up_count" will no longer be the number of
+ * children that are currently up. This has serious
+ * implications e.g. for quorum enforcement, so we
+ * don't increment these values unless the event
+ * represents an actual state transition between "up"
+ * (value = 1) and anything else.
+ */
+ if (priv->child_up[idx] == 1) {
+ priv->event_generation++;
+ }
+
+ /*
+ * If this is an _actual_ CHILD_DOWN event, we
+ * want to set the child_latency to < 0 to indicate
+ * the child is really disconnected.
+ */
+ if (child_latency_msec < 0) {
+ priv->child_latency[idx] = child_latency_msec;
+ priv->halo_child_up[idx] = 0;
+ }
+ priv->child_up[idx] = 0;
+
+ up_children = __afr_get_up_children_count(priv);
+ /*
+ * Handle the edge case where we need to find the
+ * next best child (to mark up) as marking this child
+ * down would cause us to fall below halo_min_replicas.
+ * We will also force the SHD to heal this child _now_
+ * as we want it to be up to date if we are going to
+ * begin using it synchronously.
+ */
+ if (priv->halo_enabled && up_children < priv->halo_min_replicas) {
+ best_down_child = find_best_down_child(this);
+ if (best_down_child >= 0) {
+ gf_msg_debug(this->name, 0,
+ "Swapping out child %d for "
+ "child %d to satisfy halo_min_replicas (%d).",
+ idx, best_down_child, priv->halo_min_replicas);
+ priv->child_up[best_down_child] = 1;
+ *call_psh = 1;
+ *up_child = best_down_child;
+ }
+ }
+ for (i = 0; i < priv->child_count; i++)
+ if (priv->child_up[i] == 0)
+ down_children++;
+ if (down_children == priv->child_count) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SUBVOLS_DOWN,
+ "All subvolumes are down. Going "
+ "offline until at least one of them "
+ "comes back up.");
+ gf_event(EVENT_AFR_SUBVOLS_DOWN, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ } else {
+ *event = GF_EVENT_SOME_DESCENDENT_DOWN;
+ }
+ priv->last_event[idx] = *event;
+}
+
+void
+afr_ta_lock_release_synctask(xlator_t *this)
{
- uint64_t ctx_int = 0;
- afr_inode_ctx_t *ctx = NULL;
+ call_frame_t *ta_frame = NULL;
+ int ret = 0;
- afr_spb_choice_timeout_cancel (this, inode);
- inode_ctx_del (inode, this, &ctx_int);
- if (!ctx_int)
- return 0;
+ ta_frame = afr_ta_frame_create(this);
+ if (!ta_frame) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to create ta_frame");
+ return;
+ }
- ctx = (afr_inode_ctx_t *)ctx_int;
- GF_FREE (ctx);
- return 0;
+ ret = synctask_new(this->ctx->env, afr_release_notify_lock_for_ta,
+ afr_ta_lock_release_done, ta_frame, this);
+ if (ret) {
+ STACK_DESTROY(ta_frame->root);
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to release "
+ "AFR_TA_DOM_NOTIFY lock.");
+ }
}
-int
-afr_priv_dump (xlator_t *this)
+static void
+afr_handle_inodelk_contention(xlator_t *this, struct gf_upcall *upcall)
{
- afr_private_t *priv = NULL;
- char key_prefix[GF_DUMP_MAX_BUF_LEN];
- char key[GF_DUMP_MAX_BUF_LEN];
- int i = 0;
+ struct gf_upcall_inodelk_contention *lc = NULL;
+ unsigned int inmem_count = 0;
+ unsigned int onwire_count = 0;
+ afr_private_t *priv = this->private;
+ lc = upcall->data;
- GF_ASSERT (this);
- priv = this->private;
+ if (strcmp(lc->domain, AFR_TA_DOM_NOTIFY) != 0)
+ return;
- GF_ASSERT (priv);
- snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name);
- gf_proc_dump_add_section(key_prefix);
- gf_proc_dump_write("child_count", "%u", priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- sprintf (key, "child_up[%d]", i);
- gf_proc_dump_write(key, "%d", priv->child_up[i]);
- sprintf (key, "pending_key[%d]", i);
- gf_proc_dump_write(key, "%s", priv->pending_key[i]);
- }
- gf_proc_dump_write("data_self_heal", "%s", priv->data_self_heal);
- gf_proc_dump_write("metadata_self_heal", "%d", priv->metadata_self_heal);
- gf_proc_dump_write("entry_self_heal", "%d", priv->entry_self_heal);
- gf_proc_dump_write("data_change_log", "%d", priv->data_change_log);
- gf_proc_dump_write("metadata_change_log", "%d", priv->metadata_change_log);
- gf_proc_dump_write("entry-change_log", "%d", priv->entry_change_log);
- gf_proc_dump_write("read_child", "%d", priv->read_child);
- gf_proc_dump_write("favorite_child", "%d", priv->favorite_child);
- gf_proc_dump_write("wait_count", "%u", priv->wait_count);
- gf_proc_dump_write("quorum-reads", "%d", priv->quorum_reads);
+ if (priv->shd.iamshd) {
+ /* shd should ignore AFR_TA_DOM_NOTIFY release requests. */
+ return;
+ }
+ LOCK(&priv->lock);
+ {
+ if (priv->release_ta_notify_dom_lock == _gf_true) {
+ /* Ignore multiple release requests from shds.*/
+ UNLOCK(&priv->lock);
+ return;
+ }
+ priv->release_ta_notify_dom_lock = _gf_true;
+ inmem_count = priv->ta_in_mem_txn_count;
+ onwire_count = priv->ta_on_wire_txn_count;
+ }
+ UNLOCK(&priv->lock);
+ if (inmem_count || onwire_count)
+ /* lock release will happen in txn code path after
+ * in-memory or on-wire txns are over.*/
+ return;
- return 0;
+ afr_ta_lock_release_synctask(this);
}
+static void
+afr_handle_upcall_event(xlator_t *this, struct gf_upcall *upcall)
+{
+ struct gf_upcall_cache_invalidation *up_ci = NULL;
+ afr_private_t *priv = this->private;
+ inode_t *inode = NULL;
+ inode_table_t *itable = NULL;
+ int i = 0;
+
+ switch (upcall->event_type) {
+ case GF_UPCALL_INODELK_CONTENTION:
+ afr_handle_inodelk_contention(this, upcall);
+ break;
+ case GF_UPCALL_CACHE_INVALIDATION:
+ up_ci = (struct gf_upcall_cache_invalidation *)upcall->data;
+
+ /* Since md-cache will be aggressively filtering
+ * lookups, the stale read issue will be more
+ * pronounced. Hence when a pending xattr is set notify
+ * all the md-cache clients to invalidate the existing
+ * stat cache and send the lookup next time */
+ if (!up_ci->dict)
+ break;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!dict_get(up_ci->dict, priv->pending_key[i]))
+ continue;
+ up_ci->flags |= UP_INVAL_ATTR;
+ itable = ((xlator_t *)this->graph->top)->itable;
+ /*Internal processes may not have itable for
+ *top xlator*/
+ if (itable)
+ inode = inode_find(itable, upcall->gfid);
+ if (inode)
+ afr_inode_need_refresh_set(inode, this);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
-/**
- * find_child_index - find the child's index in the array of subvolumes
- * @this: AFR
- * @child: child
- */
-
-static int
-find_child_index (xlator_t *this, xlator_t *child)
-{
- afr_private_t *priv = NULL;
- int i = -1;
-
- priv = this->private;
+int32_t
+afr_notify(xlator_t *this, int32_t event, void *data, void *data2)
+{
+ afr_private_t *priv = NULL;
+ xlator_t *child_xlator = NULL;
+ int i = -1;
+ int propagate = 0;
+ int had_heard_from_all = 0;
+ int have_heard_from_all = 0;
+ int idx = -1;
+ int ret = -1;
+ int call_psh = 0;
+ int up_child = -1;
+ dict_t *input = NULL;
+ dict_t *output = NULL;
+ gf_boolean_t had_quorum = _gf_false;
+ gf_boolean_t has_quorum = _gf_false;
+ int64_t halo_max_latency_msec = 0;
+ int64_t child_latency_msec = -1;
+
+ child_xlator = (xlator_t *)data;
+
+ priv = this->private;
+
+ if (!priv)
+ return 0;
- for (i = 0; i < priv->child_count; i++) {
- if ((xlator_t *) child == priv->children[i])
- break;
+ /*
+ * We need to reset this in case children come up in "staggered"
+ * fashion, so that we discover a late-arriving local subvolume. Note
+ * that we could end up issuing N lookups to the first subvolume, and
+ * O(N^2) overall, but N is small for AFR so it shouldn't be an issue.
+ */
+ priv->did_discovery = _gf_false;
+
+ /* parent xlators don't need to know about every child_up, child_down
+ * because of afr ha. If all subvolumes go down, child_down has
+ * to be triggered. In that state when 1 subvolume comes up child_up
+ * needs to be triggered. dht optimizes revalidate lookup by sending
+ * it only to one of its subvolumes. When child up/down happens
+ * for afr's subvolumes dht should be notified by child_modified. The
+ * subsequent revalidate lookup happens on all the dht's subvolumes
+ * which triggers afr self-heals if any.
+ */
+ idx = afr_find_child_index(this, child_xlator);
+ if (idx < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_CHILD_UP,
+ "Received child_up from invalid subvolume");
+ goto out;
+ }
+
+ had_quorum = priv->quorum_count &&
+ afr_has_quorum(priv->child_up, this, NULL);
+ if (event == GF_EVENT_CHILD_PING) {
+ child_latency_msec = (int64_t)(uintptr_t)data2;
+ if (priv->halo_enabled) {
+ halo_max_latency_msec = afr_get_halo_latency(this);
+
+ /* Calculates the child latency and sets event
+ */
+ LOCK(&priv->lock);
+ {
+ __afr_handle_ping_event(this, child_xlator, idx,
+ halo_max_latency_msec, &event,
+ child_latency_msec);
+ }
+ UNLOCK(&priv->lock);
+ } else {
+ LOCK(&priv->lock);
+ {
+ priv->child_latency[idx] = child_latency_msec;
+ }
+ UNLOCK(&priv->lock);
}
+ }
- return i;
-}
-
-int32_t
-afr_notify (xlator_t *this, int32_t event,
- void *data, void *data2)
-{
- afr_private_t *priv = NULL;
- int i = -1;
- int up_children = 0;
- int down_children = 0;
- int propagate = 0;
- int had_heard_from_all = 0;
- int have_heard_from_all = 0;
- int idx = -1;
- int ret = -1;
- int call_psh = 0;
- int up_child = -1;
- dict_t *input = NULL;
- dict_t *output = NULL;
- gf_boolean_t had_quorum = _gf_false;
- gf_boolean_t has_quorum = _gf_false;
-
- priv = this->private;
-
- if (!priv)
- return 0;
-
- /*
- * We need to reset this in case children come up in "staggered"
- * fashion, so that we discover a late-arriving local subvolume. Note
- * that we could end up issuing N lookups to the first subvolume, and
- * O(N^2) overall, but N is small for AFR so it shouldn't be an issue.
+ if (event == GF_EVENT_CHILD_PING) {
+ /* This is the only xlator that handles PING, no reason to
+ * propagate.
*/
- priv->did_discovery = _gf_false;
+ goto out;
+ }
- had_heard_from_all = 1;
- for (i = 0; i < priv->child_count; i++) {
- if (!priv->last_event[i]) {
- had_heard_from_all = 0;
- }
+ if (event == GF_EVENT_TRANSLATOR_OP) {
+ LOCK(&priv->lock);
+ {
+ had_heard_from_all = __get_heard_from_all_status(this);
}
+ UNLOCK(&priv->lock);
- /* parent xlators dont need to know about every child_up, child_down
- * because of afr ha. If all subvolumes go down, child_down has
- * to be triggered. In that state when 1 subvolume comes up child_up
- * needs to be triggered. dht optimizes revalidate lookup by sending
- * it only to one of its subvolumes. When child up/down happens
- * for afr's subvolumes dht should be notified by child_modified. The
- * subsequent revalidate lookup happens on all the dht's subvolumes
- * which triggers afr self-heals if any.
- */
- idx = find_child_index (this, data);
- if (idx < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_CHILD_UP,
- "Received child_up from invalid subvolume");
- goto out;
+ if (!had_heard_from_all) {
+ ret = -1;
+ } else {
+ input = data;
+ output = data2;
+ ret = afr_xl_op(this, input, output);
}
+ goto out;
+ }
- had_quorum = priv->quorum_count && afr_has_quorum (priv->child_up,
- this);
+ if (event == GF_EVENT_UPCALL) {
+ afr_handle_upcall_event(this, data);
+ }
+
+ LOCK(&priv->lock);
+ {
+ had_heard_from_all = __get_heard_from_all_status(this);
switch (event) {
- case GF_EVENT_CHILD_UP:
- LOCK (&priv->lock);
- {
- /*
- * This only really counts if the child was never up
- * (value = -1) or had been down (value = 0). See
- * comment at GF_EVENT_CHILD_DOWN for a more detailed
- * explanation.
- */
- if (priv->child_up[idx] != 1) {
- priv->up_count++;
- priv->event_generation++;
- }
- priv->child_up[idx] = 1;
-
- call_psh = 1;
- up_child = idx;
- for (i = 0; i < priv->child_count; i++)
- if (priv->child_up[i] == 1)
- up_children++;
- if (up_children == 1) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_SUBVOL_UP,
- "Subvolume '%s' came back up; "
- "going online.", ((xlator_t *)data)->name);
- } else {
- event = GF_EVENT_CHILD_MODIFIED;
- }
-
- priv->last_event[idx] = event;
+ case GF_EVENT_PARENT_UP:
+ __afr_launch_notify_timer(this, priv);
+ propagate = 1;
+ break;
+ case GF_EVENT_CHILD_UP:
+ if (priv->thin_arbiter_count &&
+ (idx == AFR_CHILD_THIN_ARBITER)) {
+ priv->ta_child_up = 1;
+ priv->ta_event_gen++;
+ break;
}
- UNLOCK (&priv->lock);
-
+ __afr_handle_child_up_event(this, child_xlator, idx,
+ child_latency_msec, &event,
+ &call_psh, &up_child);
+ __afr_lock_heal_synctask(this, priv, idx);
break;
- case GF_EVENT_CHILD_DOWN:
- LOCK (&priv->lock);
- {
- /*
- * If a brick is down when we start, we'll get a
- * CHILD_DOWN to indicate its initial state. There
- * was never a CHILD_UP in this case, so if we
- * increment "down_count" the difference between than
- * and "up_count" will no longer be the number of
- * children that are currently up. This has serious
- * implications e.g. for quorum enforcement, so we
- * don't increment these values unless the event
- * represents an actual state transition between "up"
- * (value = 1) and anything else.
- */
- if (priv->child_up[idx] == 1) {
- priv->down_count++;
- priv->event_generation++;
- }
- priv->child_up[idx] = 0;
-
- for (i = 0; i < priv->child_count; i++)
- if (priv->child_up[i] == 0)
- down_children++;
- if (down_children == priv->child_count) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_ALL_SUBVOLS_DOWN,
- "All subvolumes are down. Going offline "
- "until atleast one of them comes back up.");
- } else {
- event = GF_EVENT_CHILD_MODIFIED;
- }
-
- priv->last_event[idx] = event;
+ case GF_EVENT_CHILD_DOWN:
+ if (priv->thin_arbiter_count &&
+ (idx == AFR_CHILD_THIN_ARBITER)) {
+ priv->ta_child_up = 0;
+ priv->ta_event_gen++;
+ afr_ta_locked_priv_invalidate(priv);
+ break;
}
- UNLOCK (&priv->lock);
-
+ __afr_handle_child_down_event(this, child_xlator, idx,
+ child_latency_msec, &event,
+ &call_psh, &up_child);
+ __afr_mark_pending_lk_heal(this, priv, idx);
break;
- case GF_EVENT_CHILD_CONNECTING:
- LOCK (&priv->lock);
- {
- priv->last_event[idx] = event;
- }
- UNLOCK (&priv->lock);
+ case GF_EVENT_CHILD_CONNECTING:
+ priv->last_event[idx] = event;
break;
- case GF_EVENT_TRANSLATOR_OP:
- input = data;
- output = data2;
- if (!had_heard_from_all) {
- ret = -1;
- goto out;
- }
- ret = afr_xl_op (this, input, output);
- goto out;
+ case GF_EVENT_SOME_DESCENDENT_DOWN:
+ priv->last_event[idx] = event;
break;
-
- default:
+ default:
propagate = 1;
break;
}
-
- if (priv->quorum_count) {
- has_quorum = afr_has_quorum (priv->child_up, this);
- if (!had_quorum && has_quorum)
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_QUORUM_MET,
- "Client-quorum is met");
- if (had_quorum && !has_quorum)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_QUORUM_FAIL,
- "Client-quorum is not met");
- }
-
- /* have all subvolumes reported status once by now? */
- have_heard_from_all = 1;
- for (i = 0; i < priv->child_count; i++) {
- if (!priv->last_event[i])
- have_heard_from_all = 0;
- }
-
- /* if all subvols have reported status, no need to hide anything
- or wait for anything else. Just propagate blindly */
- if (have_heard_from_all)
- propagate = 1;
-
+ have_heard_from_all = __get_heard_from_all_status(this);
if (!had_heard_from_all && have_heard_from_all) {
- /* This is the first event which completes aggregation
- of events from all subvolumes. If at least one subvol
- had come up, propagate CHILD_UP, but only this time
- */
- event = GF_EVENT_CHILD_DOWN;
-
- LOCK (&priv->lock);
- {
- up_children = AFR_COUNT (priv->child_up, priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- if (priv->last_event[i] == GF_EVENT_CHILD_UP) {
- event = GF_EVENT_CHILD_UP;
- break;
- }
-
- if (priv->last_event[i] ==
- GF_EVENT_CHILD_CONNECTING) {
- event = GF_EVENT_CHILD_CONNECTING;
- /* continue to check other events for CHILD_UP */
- }
- }
+ if (priv->timer) {
+ gf_timer_call_cancel(this->ctx, priv->timer);
+ priv->timer = NULL;
+ }
+ /* This is the first event which completes aggregation
+ of events from all subvolumes. If at least one subvol
+ had come up, propagate CHILD_UP, but only this time
+ */
+ event = GF_EVENT_CHILD_DOWN;
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->last_event[i] == GF_EVENT_CHILD_UP) {
+ event = GF_EVENT_CHILD_UP;
+ break;
}
- UNLOCK (&priv->lock);
- }
-
- ret = 0;
- if (propagate)
- ret = default_notify (this, event, data);
- if (!had_heard_from_all && have_heard_from_all && priv->shd.iamshd) {
- /*
- * Since self-heal is supposed to be launched only after
- * the responses from all the bricks are collected,
- * launch self-heals now on all up subvols.
- */
- for (i = 0; i < priv->child_count; i++)
- if (priv->child_up[i])
- afr_selfheal_childup (this, i);
- } else if (have_heard_from_all && call_psh && priv->shd.iamshd) {
- /*
- * Already heard from everyone. Just launch heal on now up
- * subvolume.
- */
- afr_selfheal_childup (this, up_child);
- }
+ if (priv->last_event[i] == GF_EVENT_CHILD_CONNECTING) {
+ event = GF_EVENT_CHILD_CONNECTING;
+ /* continue to check other events for CHILD_UP */
+ }
+ }
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (priv->quorum_count) {
+ has_quorum = afr_has_quorum(priv->child_up, this, NULL);
+ if (!had_quorum && has_quorum) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_QUORUM_MET,
+ "Client-quorum is met");
+ gf_event(EVENT_AFR_QUORUM_MET, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ }
+ if (had_quorum && !has_quorum) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_QUORUM_FAIL,
+ "Client-quorum is not met");
+ gf_event(EVENT_AFR_QUORUM_FAIL, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ }
+ }
+
+ /* if all subvols have reported status, no need to hide anything
+ or wait for anything else. Just propagate blindly */
+ if (have_heard_from_all)
+ propagate = 1;
+
+ ret = 0;
+ if (propagate)
+ ret = default_notify(this, event, data);
+
+ if ((!had_heard_from_all) || call_psh) {
+ /* Launch self-heal on all local subvolumes if:
+ * a) We have_heard_from_all for the first time
+ * b) Already heard from everyone, but we now got a child-up
+ * event.
+ */
+ if (have_heard_from_all) {
+ afr_selfheal_childup(this, priv);
+ }
+ }
out:
- return ret;
+ return ret;
}
+int
+afr_local_init(afr_local_t *local, afr_private_t *priv, int32_t *op_errno)
+{
+ int __ret = -1;
+ local->op_ret = -1;
+ local->op_errno = EUCLEAN;
+
+ __ret = syncbarrier_init(&local->barrier);
+ if (__ret) {
+ if (op_errno)
+ *op_errno = __ret;
+ goto out;
+ }
+
+ local->child_up = GF_MALLOC(priv->child_count * sizeof(*local->child_up),
+ gf_afr_mt_char);
+ if (!local->child_up) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ memcpy(local->child_up, priv->child_up,
+ sizeof(*local->child_up) * priv->child_count);
+ local->call_count = AFR_COUNT(local->child_up, priv->child_count);
+ if (local->call_count == 0) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, AFR_MSG_SUBVOLS_DOWN,
+ "no subvolumes up");
+ if (op_errno)
+ *op_errno = ENOTCONN;
+ goto out;
+ }
+
+ local->event_generation = priv->event_generation;
+
+ local->read_attempted = GF_CALLOC(priv->child_count, sizeof(char),
+ gf_afr_mt_char);
+ if (!local->read_attempted) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->readable = GF_CALLOC(priv->child_count, sizeof(char),
+ gf_afr_mt_char);
+ if (!local->readable) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->readable2 = GF_CALLOC(priv->child_count, sizeof(char),
+ gf_afr_mt_char);
+ if (!local->readable2) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->read_subvol = -1;
+
+ local->replies = GF_CALLOC(priv->child_count, sizeof(*local->replies),
+ gf_afr_mt_reply_t);
+ if (!local->replies) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->need_full_crawl = _gf_false;
+ if (priv->thin_arbiter_count) {
+ local->ta_child_up = priv->ta_child_up;
+ local->ta_failed_subvol = AFR_CHILD_UNKNOWN;
+ local->read_txn_query_child = AFR_CHILD_UNKNOWN;
+ local->ta_event_gen = priv->ta_event_gen;
+ local->fop_state = TA_SUCCESS;
+ }
+ local->is_new_entry = _gf_false;
+
+ INIT_LIST_HEAD(&local->healer);
+ return 0;
+out:
+ return -1;
+}
int
-afr_local_init (afr_local_t *local, afr_private_t *priv, int32_t *op_errno)
+afr_internal_lock_init(afr_internal_lock_t *lk, size_t child_count)
{
- local->op_ret = -1;
- local->op_errno = EUCLEAN;
+ int ret = -ENOMEM;
- syncbarrier_init (&local->barrier);
+ lk->lower_locked_nodes = GF_CALLOC(sizeof(*lk->lower_locked_nodes),
+ child_count, gf_afr_mt_char);
+ if (NULL == lk->lower_locked_nodes)
+ goto out;
- local->child_up = GF_CALLOC (priv->child_count,
- sizeof (*local->child_up),
- gf_afr_mt_char);
- if (!local->child_up) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
+ lk->lock_op_ret = -1;
+ lk->lock_op_errno = EUCLEAN;
- memcpy (local->child_up, priv->child_up,
- sizeof (*local->child_up) * priv->child_count);
- local->call_count = AFR_COUNT (local->child_up, priv->child_count);
- if (local->call_count == 0) {
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- AFR_MSG_ALL_SUBVOLS_DOWN, "no subvolumes up");
- if (op_errno)
- *op_errno = ENOTCONN;
- goto out;
- }
- local->event_generation = priv->event_generation;
-
- local->read_attempted = GF_CALLOC (priv->child_count, sizeof (char),
- gf_afr_mt_char);
- if (!local->read_attempted) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
-
- local->readable = GF_CALLOC (priv->child_count, sizeof (char),
- gf_afr_mt_char);
- if (!local->readable) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
-
- local->replies = GF_CALLOC(priv->child_count, sizeof(*local->replies),
- gf_afr_mt_reply_t);
- if (!local->replies) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
-
- return 0;
+ ret = 0;
out:
- return -1;
+ return ret;
}
-int
-afr_internal_lock_init (afr_internal_lock_t *lk, size_t child_count,
- transaction_lk_type_t lk_type)
+void
+afr_matrix_cleanup(int32_t **matrix, unsigned int m)
{
- int ret = -ENOMEM;
+ int i = 0;
- lk->locked_nodes = GF_CALLOC (sizeof (*lk->locked_nodes),
- child_count, gf_afr_mt_char);
- if (NULL == lk->locked_nodes)
- goto out;
+ if (!matrix)
+ goto out;
+ for (i = 0; i < m; i++) {
+ GF_FREE(matrix[i]);
+ }
- lk->lower_locked_nodes = GF_CALLOC (sizeof (*lk->lower_locked_nodes),
- child_count, gf_afr_mt_char);
- if (NULL == lk->lower_locked_nodes)
- goto out;
+ GF_FREE(matrix);
+out:
+ return;
+}
- lk->lock_op_ret = -1;
- lk->lock_op_errno = EUCLEAN;
- lk->transaction_lk_type = lk_type;
+int32_t **
+afr_matrix_create(unsigned int m, unsigned int n)
+{
+ int32_t **matrix = NULL;
+ int i = 0;
- ret = 0;
+ matrix = GF_CALLOC(sizeof(*matrix), m, gf_afr_mt_int32_t);
+ if (!matrix)
+ goto out;
+
+ for (i = 0; i < m; i++) {
+ matrix[i] = GF_CALLOC(sizeof(*matrix[i]), n, gf_afr_mt_int32_t);
+ if (!matrix[i])
+ goto out;
+ }
+ return matrix;
out:
- return ret;
+ afr_matrix_cleanup(matrix, m);
+ return NULL;
+}
+
+int
+afr_transaction_local_init(afr_local_t *local, xlator_t *this)
+{
+ int ret = -ENOMEM;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ INIT_LIST_HEAD(&local->transaction.wait_list);
+ INIT_LIST_HEAD(&local->transaction.owner_list);
+ INIT_LIST_HEAD(&local->ta_waitq);
+ INIT_LIST_HEAD(&local->ta_onwireq);
+ ret = afr_internal_lock_init(&local->internal_lock, priv->child_count);
+ if (ret < 0)
+ goto out;
+
+ ret = -ENOMEM;
+ local->pre_op_compat = priv->pre_op_compat;
+
+ local->transaction.pre_op = GF_CALLOC(sizeof(*local->transaction.pre_op),
+ priv->child_count, gf_afr_mt_char);
+ if (!local->transaction.pre_op)
+ goto out;
+
+ local->transaction.changelog_xdata = GF_CALLOC(
+ sizeof(*local->transaction.changelog_xdata), priv->child_count,
+ gf_afr_mt_dict_t);
+ if (!local->transaction.changelog_xdata)
+ goto out;
+
+ if (priv->arbiter_count == 1) {
+ local->transaction.pre_op_sources = GF_CALLOC(
+ sizeof(*local->transaction.pre_op_sources), priv->child_count,
+ gf_afr_mt_char);
+ if (!local->transaction.pre_op_sources)
+ goto out;
+ }
+
+ local->transaction.failed_subvols = GF_CALLOC(
+ sizeof(*local->transaction.failed_subvols), priv->child_count,
+ gf_afr_mt_char);
+ if (!local->transaction.failed_subvols)
+ goto out;
+
+ local->pending = afr_matrix_create(priv->child_count, AFR_NUM_CHANGE_LOGS);
+ if (!local->pending)
+ goto out;
+
+ ret = 0;
+out:
+ return ret;
}
void
-afr_matrix_cleanup (int32_t **matrix, unsigned int m)
+afr_set_low_priority(call_frame_t *frame)
{
- int i = 0;
-
- if (!matrix)
- goto out;
- for (i = 0; i < m; i++) {
- GF_FREE (matrix[i]);
- }
+ frame->root->pid = LOW_PRIO_PROC_PID;
+}
- GF_FREE (matrix);
+void
+afr_priv_destroy(afr_private_t *priv)
+{
+ int i = 0;
+ int child_count = -1;
+
+ if (!priv)
+ goto out;
+
+ GF_FREE(priv->sh_domain);
+ GF_FREE(priv->last_event);
+
+ child_count = priv->child_count;
+ if (priv->thin_arbiter_count) {
+ child_count++;
+ }
+ if (priv->pending_key) {
+ for (i = 0; i < child_count; i++)
+ GF_FREE(priv->pending_key[i]);
+ }
+
+ GF_FREE(priv->pending_reads);
+ GF_FREE(priv->local);
+ GF_FREE(priv->pending_key);
+ GF_FREE(priv->children);
+ GF_FREE(priv->anon_inode);
+ GF_FREE(priv->child_up);
+ GF_FREE(priv->halo_child_up);
+ GF_FREE(priv->child_latency);
+ LOCK_DESTROY(&priv->lock);
+
+ GF_FREE(priv);
out:
- return;
+ return;
}
-int32_t**
-afr_matrix_create (unsigned int m, unsigned int n)
+int **
+afr_mark_pending_changelog(afr_private_t *priv, unsigned char *pending,
+ dict_t *xattr, ia_type_t iat)
{
- int32_t **matrix = NULL;
- int i = 0;
+ int i = 0;
+ int **changelog = NULL;
+ int idx = -1;
+ int m_idx = 0;
+ int d_idx = 0;
+ int ret = 0;
- matrix = GF_CALLOC (sizeof (*matrix), m, gf_afr_mt_int32_t);
- if (!matrix)
- goto out;
+ m_idx = afr_index_for_transaction_type(AFR_METADATA_TRANSACTION);
+ d_idx = afr_index_for_transaction_type(AFR_DATA_TRANSACTION);
- for (i = 0; i < m; i++) {
- matrix[i] = GF_CALLOC (sizeof (*matrix[i]), n,
- gf_afr_mt_int32_t);
- if (!matrix[i])
- goto out;
- }
- return matrix;
-out:
- afr_matrix_cleanup (matrix, m);
+ idx = afr_index_from_ia_type(iat);
+
+ changelog = afr_matrix_create(priv->child_count, AFR_NUM_CHANGE_LOGS);
+ if (!changelog)
+ goto out;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!pending[i])
+ continue;
+
+ changelog[i][m_idx] = hton32(1);
+ if (idx != -1)
+ changelog[i][idx] = hton32(1);
+ /* If the newentry marking is on a newly created directory,
+ * then mark it with the full-heal indicator.
+ */
+ if ((IA_ISDIR(iat)) && (priv->esh_granular))
+ changelog[i][d_idx] = hton32(1);
+ }
+ ret = afr_set_pending_dict(priv, xattr, changelog);
+ if (ret < 0) {
+ afr_matrix_cleanup(changelog, priv->child_count);
return NULL;
+ }
+out:
+ return changelog;
}
-int
-afr_inodelk_init (afr_inodelk_t *lk, char *dom, size_t child_count)
+static dict_t *
+afr_set_heal_info(char *status)
{
- int ret = -ENOMEM;
+ dict_t *dict = NULL;
+ int ret = -1;
- lk->domain = dom;
- lk->locked_nodes = GF_CALLOC (sizeof (*lk->locked_nodes),
- child_count, gf_afr_mt_char);
- if (NULL == lk->locked_nodes)
- goto out;
- ret = 0;
+ dict = dict_new();
+ if (!dict) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = dict_set_dynstr_sizen(dict, "heal-info", status);
+ if (ret)
+ gf_msg("", GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Failed to set heal-info key to "
+ "%s",
+ status);
out:
- return ret;
+ /* Any error other than EINVAL, dict_set_dynstr frees status */
+ if (ret == -ENOMEM || ret == -EINVAL) {
+ GF_FREE(status);
+ }
+
+ if (ret && dict) {
+ dict_unref(dict);
+ dict = NULL;
+ }
+ return dict;
}
-int
-afr_transaction_local_init (afr_local_t *local, xlator_t *this)
+static gf_boolean_t
+afr_is_dirty_count_non_unary_for_txn(xlator_t *this, struct afr_reply *replies,
+ afr_transaction_type type)
{
- int child_up_count = 0;
- int ret = -ENOMEM;
- afr_private_t *priv = NULL;
+ afr_private_t *priv = this->private;
+ int *dirty = alloca0(priv->child_count * sizeof(int));
+ int i = 0;
- priv = this->private;
- ret = afr_internal_lock_init (&local->internal_lock, priv->child_count,
- AFR_TRANSACTION_LK);
- if (ret < 0)
- goto out;
+ afr_selfheal_extract_xattr(this, replies, type, dirty, NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (dirty[i] > 1)
+ return _gf_true;
+ }
- if ((local->transaction.type == AFR_DATA_TRANSACTION) ||
- (local->transaction.type == AFR_METADATA_TRANSACTION)) {
- ret = afr_inodelk_init (&local->internal_lock.inodelk[0],
- this->name, priv->child_count);
- if (ret < 0)
- goto out;
- }
+ return _gf_false;