summaryrefslogtreecommitdiffstats
path: root/doc/developer-guide/datastructure-iobuf.md
blob: 5f521f1485fa94613e47dbd90da2722442bfc44e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
#Iobuf-pool
##Datastructures
###iobuf
Short for IO Buffer. It is one allocatable unit for the consumers of the IOBUF
API, each unit hosts @page_size(defined in arena structure) bytes of memory. As
initial step of processing a fop, the IO buffer passed onto GlusterFS by the
other applications (FUSE VFS/ Applications using gfapi) is copied into GlusterFS
space i.e. iobufs. Hence Iobufs are mostly allocated/deallocated in Fuse, gfapi,
protocol xlators, and also in performance xlators to cache the IO buffers etc.
```
struct iobuf {
        union {
                struct list_head      list;
                struct {
                        struct iobuf *next;
                        struct iobuf *prev;
                };
        };
        struct iobuf_arena  *iobuf_arena;

        gf_lock_t            lock; /* for ->ptr and ->ref */
        int                  ref;  /* 0 == passive, >0 == active */

        void                *ptr;  /* usable memory region by the consumer */

        void                *free_ptr; /* in case of stdalloc, this is the
                                          one to be freed not the *ptr */
};
```

###iobref
There may be need of multiple iobufs for a single fop, like in vectored read/write.
Hence multiple iobufs(default 16) are encapsulated under one iobref.
```
struct iobref {
        gf_lock_t          lock;
        int                ref;
        struct iobuf     **iobrefs; /* list of iobufs */
        int                alloced; /* 16 by default, grows as required */
        int                used;    /* number of iobufs added to this iobref */
};
```
###iobuf_arenas
One region of memory MMAPed from the operating system. Each region MMAPs
@arena_size bytes of memory, and hosts @arena_size / @page_size IOBUFs.
The same sized iobufs are grouped into one arena, for sanity of access.

```
struct iobuf_arena {
        union {
                struct list_head            list;
                struct {
                        struct iobuf_arena *next;
                        struct iobuf_arena *prev;
                };
        };

        size_t              page_size;  /* size of all iobufs in this arena */
        size_t              arena_size; /* this is equal to
                                           (iobuf_pool->arena_size / page_size)
                                           * page_size */
        size_t              page_count;

        struct iobuf_pool  *iobuf_pool;

        void               *mem_base;
        struct iobuf       *iobufs;     /* allocated iobufs list */

        int                 active_cnt;
        struct iobuf        active;     /* head node iobuf
                                           (unused by itself) */
        int                 passive_cnt;
        struct iobuf        passive;    /* head node iobuf
                                           (unused by itself) */
        uint64_t            alloc_cnt;  /* total allocs in this pool */
        int                 max_active; /* max active buffers at a given time */
};

```
###iobuf_pool
Pool of Iobufs. As there may be many Io buffers required by the filesystem,
a pool of iobufs are preallocated and kept, if these preallocated ones are
exhausted only then the standard malloc/free is called, thus improving the
performance. Iobuf pool is generally one per process, allocated during
glusterfs_ctx_t init (glusterfs_ctx_defaults_init), currently the preallocated
iobuf pool memory is freed on process exit. Iobuf pool is globally accessible
across GlusterFs, hence iobufs allocated by any xlator can be accessed by any
other xlators(unless iobuf is not passed).
```
struct iobuf_pool {
        pthread_mutex_t     mutex;
        size_t              arena_size; /* size of memory region in
                                           arena */
        size_t              default_page_size; /* default size of iobuf */

        int                 arena_cnt;
        struct list_head    arenas[GF_VARIABLE_IOBUF_COUNT];
        /* array of arenas. Each element of the array is a list of arenas
           holding iobufs of particular page_size */

        struct list_head    filled[GF_VARIABLE_IOBUF_COUNT];
        /* array of arenas without free iobufs */

        struct list_head    purge[GF_VARIABLE_IOBUF_COUNT];
        /* array of of arenas which can be purged */

        uint64_t            request_misses; /* mostly the requests for higher
                                               value of iobufs */
};
```
~~~
The default size of the iobuf_pool(as of yet):
1024 iobufs of 128Bytes = 128KB
512  iobufs of 512Bytes = 256KB
512  iobufs of 2KB      = 1MB
128  iobufs of 8KB      = 1MB
64   iobufs of 32KB     = 2MB
32   iobufs of 128KB    = 4MB
8    iobufs of 256KB    = 2MB
2    iobufs of 1MB      = 2MB
Total ~13MB
~~~
As seen in the datastructure iobuf_pool has 3 arena lists.

- arenas:  
The arenas allocated during iobuf_pool create, are part of this list. This list
also contains arenas that are partially filled i.e. contain few active and few
passive iobufs (passive_cnt !=0, active_cnt!=0 except for initially allocated
arenas). There will be by default 8 arenas of the sizes mentioned above.
- filled:  
If all the iobufs in the arena are filled(passive_cnt = 0), the arena is moved
to the filled list. If any of the iobufs from the filled arena is iobuf_put,
then the arena moves back to the 'arenas' list.
- purge:  
If there are no active iobufs in the arena(active_cnt = 0), the arena is moved
to purge list. iobuf_put() triggers destruction of the arenas in this list. The
arenas in the purge list are destroyed only if there is  atleast one arena in
'arenas' list, that way there won't be spurious mmap/unmap of buffers.
(e.g: If there is an arena (page_size=128KB, count=32) in purge list, this arena
is destroyed(munmap) only if there is an arena in 'arenas' list with page_size=128KB).

##APIs
###iobuf_get

```
struct iobuf *iobuf_get (struct iobuf_pool *iobuf_pool);
```
Creates a new iobuf of the default page size(128KB hard coded as of yet).
Also takes a reference(increments ref count), hence no need of doing it
explicitly after getting iobuf.

###iobuf_get2

```
struct iobuf * iobuf_get2 (struct iobuf_pool *iobuf_pool, size_t page_size);
```
Creates a new iobuf of a specified page size, if page_size=0 default page size
is considered.
```
if (requested iobuf size > Max iobuf size in the pool(1MB as of yet))
  {
     Perform standard allocation(CALLOC) of the requested size and
     add it to the list iobuf_pool->arenas[IOBUF_ARENA_MAX_INDEX].
  }
  else
  {
     -Round the page size to match the stndard sizes in iobuf pool.
     (eg: if 3KB is requested, it is rounded to 8KB).
     -Select the arena list corresponding to the rounded size
     (eg: select 8KB arena)
     If the selected arena has passive count > 0, then return the
     iobuf from this arena, set the counters(passive/active/etc.)
     appropriately.
     else the arena is full, allocate new arena with rounded size
     and standard page numbers and add to the arena list
     (eg: 128 iobufs of 8KB is allocated).
  }
```
Also takes a reference(increments ref count), hence no need of doing it
explicitly after getting iobuf.

###iobuf_ref

```
struct iobuf *iobuf_ref (struct iobuf *iobuf);
```
  Take a reference on the iobuf. If using an iobuf allocated by some other
xlator/function/, its a good practice to take a reference so that iobuf is not
deleted by the allocator.

###iobuf_unref
```
void iobuf_unref (struct iobuf *iobuf);
```
Unreference the iobuf, if the ref count is zero iobuf is considered free.

```
  -Delete the iobuf, if allocated from standard alloc and return.  
  -set the active/passive count appropriately.  
  -if passive count > 0 then add the arena to 'arena' list.  
  -if active count = 0 then add the arena to 'purge' list.  
```
Every iobuf_ref should have a corresponding iobuf_unref, and also every
iobuf_get/2 should have a correspondning iobuf_unref.

###iobref_new
```
struct iobref *iobref_new ();
```
Creates a new iobref structure and returns its pointer.

###iobref_ref
```
struct iobref *iobref_ref (struct iobref *iobref);
```
Take a reference on the iobref.

###iobref_unref
```
void iobref_unref (struct iobref *iobref);
```
Decrements the reference count of the iobref. If the ref count is 0, then unref
all the iobufs(iobuf_unref) in the iobref, and destroy the iobref.

###iobref_add
```
int iobref_add (struct iobref *iobref, struct iobuf *iobuf);
```
Adds the given iobuf into the iobref, it takes a ref on the iobuf before adding
it, hence explicit iobuf_ref is not required if adding to the iobref.

###iobref_merge
```
int iobref_merge (struct iobref *to, struct iobref *from);
```
Adds all the iobufs in the 'from' iobref to the 'to' iobref. Merge will not
cause the delete of the 'from' iobref, therefore it will result in another ref
on all the iobufs added to the 'to' iobref. Hence iobref_unref should be
performed both on 'from' and 'to' iobrefs (performing iobref_unref only on 'to'
will not free the iobufs and may result in leak).

###iobref_clear
```
void iobref_clear (struct iobref *iobref);
```
Unreference all the iobufs in the iobref, and also unref the iobref.

##Iobuf Leaks
If all iobuf_refs/iobuf_new do not have correspondning iobuf_unref, then the
iobufs are not freed and recurring execution of such code path may lead to huge
memory leaks. The easiest way to identify if a memory leak is caused by iobufs
is to take a statedump. If the statedump shows a lot of filled arenas then it is
a sure sign of leak. Refer doc/debugging/statedump.md for more details.

If iobufs are leaking, the next step is to find where the iobuf_unref went
missing. There is no standard/easy way of debugging this, code reading and logs
are the only ways. If there is a liberty to reproduce the memory leak at will,
then logs(gf_callinginfo) in iobuf_ref/unref might help.  
TODO: A easier way to debug iobuf leaks.
-rw-r--r--xlators/cluster/dht/src/dht-lock.c1392
-rw-r--r--xlators/cluster/dht/src/dht-lock.h91
-rw-r--r--xlators/cluster/dht/src/dht-mem-types.h47
-rw-r--r--xlators/cluster/dht/src/dht-messages.h1412
-rw-r--r--xlators/cluster/dht/src/dht-rebalance.c7391
-rw-r--r--xlators/cluster/dht/src/dht-rename.c2948
-rw-r--r--xlators/cluster/dht/src/dht-selfheal.c4232
-rw-r--r--xlators/cluster/dht/src/dht-shared.c1768
-rw-r--r--xlators/cluster/dht/src/dht.c156
-rw-r--r--xlators/cluster/dht/src/dht.sym8
-rw-r--r--xlators/cluster/dht/src/nufa.c1100
-rw-r--r--xlators/cluster/dht/src/nufa.sym8
-rw-r--r--xlators/cluster/dht/src/switch.c1479
-rw-r--r--xlators/cluster/dht/src/switch.sym8
-rw-r--r--xlators/cluster/dht/src/tier-common.c1084
-rw-r--r--xlators/cluster/dht/src/tier-common.h62
-rw-r--r--xlators/cluster/dht/src/tier.c2494
-rw-r--r--xlators/cluster/dht/src/tier.h105
-rw-r--r--xlators/cluster/dht/src/tier.sym9
-rw-r--r--xlators/cluster/dht/src/unittest/dht_layout_mock.c39
-rw-r--r--xlators/cluster/dht/src/unittest/dht_layout_unittest.c26
-rw-r--r--xlators/cluster/ec/src/Makefile.am36
-rw-r--r--xlators/cluster/ec/src/ec-code-avx.c109
-rw-r--r--xlators/cluster/ec/src/ec-code-avx.h (renamed from xlators/cluster/dht/src/dht-helper.h)15
-rw-r--r--xlators/cluster/ec/src/ec-code-c.c11679
-rw-r--r--xlators/cluster/ec/src/ec-code-c.h27
-rw-r--r--xlators/cluster/ec/src/ec-code-intel.c594
-rw-r--r--xlators/cluster/ec/src/ec-code-intel.h191
-rw-r--r--xlators/cluster/ec/src/ec-code-sse.c101
-rw-r--r--xlators/cluster/ec/src/ec-code-sse.h18
-rw-r--r--xlators/cluster/ec/src/ec-code-x64.c144
-rw-r--r--xlators/cluster/ec/src/ec-code-x64.h18
-rw-r--r--xlators/cluster/ec/src/ec-code.c1060
-rw-r--r--xlators/cluster/ec/src/ec-code.h44
-rw-r--r--xlators/cluster/ec/src/ec-combine.c760
-rw-r--r--xlators/cluster/ec/src/ec-combine.h38
-rw-r--r--xlators/cluster/ec/src/ec-common.c2122
-rw-r--r--xlators/cluster/ec/src/ec-common.h286
-rw-r--r--xlators/cluster/ec/src/ec-data.c217
-rw-r--r--xlators/cluster/ec/src/ec-data.h329
-rw-r--r--xlators/cluster/ec/src/ec-dir-read.c398
-rw-r--r--xlators/cluster/ec/src/ec-dir-write.c701
-rw-r--r--xlators/cluster/ec/src/ec-fops.h420
-rw-r--r--xlators/cluster/ec/src/ec-galois.c183
-rw-r--r--xlators/cluster/ec/src/ec-galois.h32
-rw-r--r--xlators/cluster/ec/src/ec-generic.c935
-rw-r--r--xlators/cluster/ec/src/ec-gf.c11635
-rw-r--r--xlators/cluster/ec/src/ec-gf8.c5882
-rw-r--r--xlators/cluster/ec/src/ec-gf8.h (renamed from xlators/cluster/ec/src/ec-gf.h)11
-rw-r--r--xlators/cluster/ec/src/ec-heal.c4763
-rw-r--r--xlators/cluster/ec/src/ec-heald.c970
-rw-r--r--xlators/cluster/ec/src/ec-heald.h41
-rw-r--r--xlators/cluster/ec/src/ec-helpers.c473
-rw-r--r--xlators/cluster/ec/src/ec-helpers.h222
-rw-r--r--xlators/cluster/ec/src/ec-inode-read.c1210
-rw-r--r--xlators/cluster/ec/src/ec-inode-write.c1985
-rw-r--r--xlators/cluster/ec/src/ec-locks.c701
-rw-r--r--xlators/cluster/ec/src/ec-mem-types.h11
-rw-r--r--xlators/cluster/ec/src/ec-messages.h557
-rw-r--r--xlators/cluster/ec/src/ec-method.c488
-rw-r--r--xlators/cluster/ec/src/ec-method.h34
-rw-r--r--xlators/cluster/ec/src/ec-types.h690
-rw-r--r--xlators/cluster/ec/src/ec.c1962
-rw-r--r--xlators/cluster/ec/src/ec.h76
-rw-r--r--xlators/cluster/ha/src/Makefile.am17
-rw-r--r--xlators/cluster/ha/src/ha-helpers.c194
-rw-r--r--xlators/cluster/ha/src/ha-mem-types.h26
-rw-r--r--xlators/cluster/ha/src/ha.c4008
-rw-r--r--xlators/cluster/ha/src/ha.h53
-rw-r--r--xlators/cluster/map/src/Makefile.am17
-rw-r--r--xlators/cluster/map/src/map-helper.c343
-rw-r--r--xlators/cluster/map/src/map-mem-types.h24
-rw-r--r--xlators/cluster/map/src/map.c2561
-rw-r--r--xlators/cluster/map/src/map.h67
-rw-r--r--xlators/cluster/stripe/src/Makefile.am20
-rw-r--r--xlators/cluster/stripe/src/stripe-helpers.c677
-rw-r--r--xlators/cluster/stripe/src/stripe-mem-types.h31
-rw-r--r--xlators/cluster/stripe/src/stripe.c5775
-rw-r--r--xlators/cluster/stripe/src/stripe.h281
-rw-r--r--xlators/debug/Makefile.am2
-rw-r--r--xlators/debug/delay-gen/Makefile.am (renamed from xlators/experimental/dht2/dht2-client/Makefile.am)0
-rw-r--r--xlators/debug/delay-gen/src/Makefile.am11
-rw-r--r--xlators/debug/delay-gen/src/delay-gen-mem-types.h21
-rw-r--r--xlators/debug/delay-gen/src/delay-gen-messages.h26
-rw-r--r--xlators/debug/delay-gen/src/delay-gen.c697
-rw-r--r--xlators/debug/delay-gen/src/delay-gen.h27
-rw-r--r--xlators/debug/error-gen/src/Makefile.am3
-rw-r--r--xlators/debug/error-gen/src/error-gen-mem-types.h6
-rw-r--r--xlators/debug/error-gen/src/error-gen.c2992
-rw-r--r--xlators/debug/error-gen/src/error-gen.h26
-rw-r--r--xlators/debug/io-stats/src/Makefile.am4
-rw-r--r--xlators/debug/io-stats/src/io-stats-mem-types.h17
-rw-r--r--xlators/debug/io-stats/src/io-stats.c6647
-rw-r--r--xlators/debug/sink/Makefile.am (renamed from xlators/encryption/crypt/Makefile.am)1
-rw-r--r--xlators/debug/sink/src/Makefile.am14
-rw-r--r--xlators/debug/sink/src/sink.c94
-rw-r--r--xlators/debug/trace/src/Makefile.am3
-rw-r--r--xlators/debug/trace/src/trace-mem-types.h7
-rw-r--r--xlators/debug/trace/src/trace.c5413
-rw-r--r--xlators/debug/trace/src/trace.h65
-rw-r--r--xlators/encryption/Makefile.am3
-rw-r--r--xlators/encryption/crypt/src/Makefile.am24
-rw-r--r--xlators/encryption/crypt/src/atom.c957
-rw-r--r--xlators/encryption/crypt/src/crypt-common.h141
-rw-r--r--xlators/encryption/crypt/src/crypt-mem-types.h45
-rw-r--r--xlators/encryption/crypt/src/crypt.c4526
-rw-r--r--xlators/encryption/crypt/src/crypt.h900
-rw-r--r--xlators/encryption/crypt/src/data.c764
-rw-r--r--xlators/encryption/crypt/src/keys.c297
-rw-r--r--xlators/encryption/crypt/src/metadata.c614
-rw-r--r--xlators/encryption/crypt/src/metadata.h74
-rw-r--r--xlators/encryption/rot-13/src/rot-13.c196
-rw-r--r--xlators/experimental/Makefile.am3
-rw-r--r--xlators/experimental/README.md107
-rw-r--r--xlators/experimental/dht2/Makefile.am3
-rw-r--r--xlators/experimental/dht2/README.md47
-rw-r--r--xlators/experimental/dht2/TODO.md3
-rw-r--r--xlators/experimental/dht2/dht2-client/src/Makefile.am19
-rw-r--r--xlators/experimental/dht2/dht2-client/src/dht2-client-main.c59
-rw-r--r--xlators/experimental/dht2/dht2-common/src/dht2-common-map.c19
-rw-r--r--xlators/experimental/dht2/dht2-server/src/Makefile.am19
-rw-r--r--xlators/experimental/dht2/dht2-server/src/dht2-server-main.c59
-rw-r--r--xlators/experimental/fdl/src/Makefile.am43
-rw-r--r--xlators/experimental/fdl/src/dump-tmpl.c156
-rw-r--r--xlators/experimental/fdl/src/fdl-tmpl.c506
-rwxr-xr-xxlators/experimental/fdl/src/gen_dumper.py116
-rwxr-xr-xxlators/experimental/fdl/src/gen_fdl.py328
-rwxr-xr-xxlators/experimental/fdl/src/gen_recon.py213
-rw-r--r--xlators/experimental/fdl/src/jnl-types.h14
-rw-r--r--xlators/experimental/fdl/src/logdump.c50
-rw-r--r--xlators/experimental/fdl/src/recon-tmpl.c305
-rw-r--r--xlators/experimental/fdl/src/recon.c89
-rw-r--r--xlators/experimental/nsr-client/src/Makefile.am32
-rw-r--r--xlators/experimental/nsr-client/src/fop-template.c113
-rwxr-xr-xxlators/experimental/nsr-client/src/gen-fops.py57
-rw-r--r--xlators/experimental/nsr-client/src/nsr-messages.h105
-rw-r--r--xlators/experimental/nsr-client/src/nsrc.c320
-rw-r--r--xlators/experimental/nsr-client/src/nsrc.h27
-rw-r--r--xlators/experimental/nsr-server/src/Makefile.am35
-rw-r--r--xlators/experimental/nsr-server/src/all-templates.c437
-rwxr-xr-xxlators/experimental/nsr-server/src/gen-fops.py138
-rw-r--r--xlators/experimental/nsr-server/src/nsr-internal.h116
-rw-r--r--xlators/experimental/nsr-server/src/nsr.c1147
-rw-r--r--xlators/features/Makefile.am16
-rw-r--r--xlators/features/arbiter/src/Makefile.am6
-rw-r--r--xlators/features/arbiter/src/arbiter-mem-types.h7
-rw-r--r--xlators/features/arbiter/src/arbiter.c503
-rw-r--r--xlators/features/arbiter/src/arbiter.h6
-rw-r--r--xlators/features/barrier/src/Makefile.am3
-rw-r--r--xlators/features/barrier/src/barrier-mem-types.h6
-rw-r--r--xlators/features/barrier/src/barrier.c1076
-rw-r--r--xlators/features/barrier/src/barrier.h129
-rw-r--r--xlators/features/bit-rot/src/bitd/Makefile.am17
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-bitd-messages.h493
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.c78
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h50
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.c2978
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.h34
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-ssm.c137
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-ssm.h28
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-tbf.c306
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-tbf.h70
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.c3283
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.h342
-rw-r--r--xlators/features/bit-rot/src/stub/Makefile.am7
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-common.h181
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-object-version.h12
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c1125
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h38
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h350
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.c4979
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.h572
-rw-r--r--xlators/features/changelog/lib/examples/c/get-changes-multi.c80
-rw-r--r--xlators/features/changelog/lib/examples/c/get-changes.c102
-rw-r--r--xlators/features/changelog/lib/examples/c/get-history.c148
-rwxr-xr-x[-rw-r--r--]xlators/features/changelog/lib/examples/python/changes.py13
-rw-r--r--xlators/features/changelog/lib/examples/python/libgfchangelog.py3
-rw-r--r--xlators/features/changelog/lib/src/Makefile.am34
-rw-r--r--xlators/features/changelog/lib/src/changelog-lib-messages.h327
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-api.c320
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-helpers.c255
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-helpers.h244
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal-handler.c1642
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal.h94
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-reborp.c583
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-rpc.c93
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-rpc.h10
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog.c883
-rw-r--r--xlators/features/changelog/lib/src/gf-history-changelog.c1492
-rw-r--r--xlators/features/changelog/src/Makefile.am25
-rw-r--r--xlators/features/changelog/src/changelog-barrier.c141
-rw-r--r--xlators/features/changelog/src/changelog-encoders.c298
-rw-r--r--xlators/features/changelog/src/changelog-encoders.h42
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.c576
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.h110
-rw-r--r--xlators/features/changelog/src/changelog-helpers.c2880
-rw-r--r--xlators/features/changelog/src/changelog-helpers.h894
-rw-r--r--xlators/features/changelog/src/changelog-mem-types.h33
-rw-r--r--xlators/features/changelog/src/changelog-messages.h590
-rw-r--r--xlators/features/changelog/src/changelog-misc.h164
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.c516
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.h59
-rw-r--r--xlators/features/changelog/src/changelog-rpc.c588
-rw-r--r--xlators/features/changelog/src/changelog-rpc.h10
-rw-r--r--xlators/features/changelog/src/changelog-rt.c63
-rw-r--r--xlators/features/changelog/src/changelog-rt.h14
-rw-r--r--xlators/features/changelog/src/changelog.c4626
-rw-r--r--xlators/features/changetimerecorder/src/Makefile.am23
-rw-r--r--xlators/features/changetimerecorder/src/changetimerecorder.c2307
-rw-r--r--xlators/features/changetimerecorder/src/changetimerecorder.h21
-rw-r--r--xlators/features/changetimerecorder/src/ctr-helper.c308
-rw-r--r--xlators/features/changetimerecorder/src/ctr-helper.h923
-rw-r--r--xlators/features/changetimerecorder/src/ctr-xlator-ctx.c409
-rw-r--r--xlators/features/changetimerecorder/src/ctr-xlator-ctx.h90
-rw-r--r--xlators/features/cloudsync/Makefile.am (renamed from xlators/experimental/dht2/dht2-server/Makefile.am)0
-rw-r--r--xlators/features/cloudsync/src/Makefile.am46
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h (renamed from xlators/features/changetimerecorder/src/ctr_mem_types.h)20
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.c60
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.h134
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-c.py324
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-h.py31
-rw-r--r--xlators/features/cloudsync/src/cloudsync-mem-types.h22
-rw-r--r--xlators/features/cloudsync/src/cloudsync-messages.h16
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am (renamed from xlators/experimental/fdl/Makefile.am)0
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am11
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am (renamed from xlators/experimental/nsr-client/Makefile.am)0
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c584
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h50
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am (renamed from xlators/experimental/nsr-server/Makefile.am)0
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h203
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c842
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h84
-rw-r--r--xlators/features/cloudsync/src/cloudsync.c2076
-rw-r--r--xlators/features/cloudsync/src/cloudsync.h123
-rw-r--r--xlators/features/compress/src/Makefile.am6
-rw-r--r--xlators/features/compress/src/cdc-helper.c762
-rw-r--r--xlators/features/compress/src/cdc-mem-types.h10
-rw-r--r--xlators/features/compress/src/cdc.c578
-rw-r--r--xlators/features/compress/src/cdc.h76
-rw-r--r--xlators/features/filter/Makefile.am3
-rw-r--r--xlators/features/filter/src/Makefile.am16
-rw-r--r--xlators/features/filter/src/filter.c1729
-rw-r--r--xlators/features/ganesha/src/Makefile.am18
-rw-r--r--xlators/features/ganesha/src/ganesha.c90
-rw-r--r--xlators/features/ganesha/src/ganesha.h18
-rw-r--r--xlators/features/gfid-access/src/Makefile.am3
-rw-r--r--xlators/features/gfid-access/src/gfid-access-mem-types.h9
-rw-r--r--xlators/features/gfid-access/src/gfid-access.c2155
-rw-r--r--xlators/features/gfid-access/src/gfid-access.h132
-rw-r--r--xlators/features/glupy/Makefile.am3
-rw-r--r--xlators/features/glupy/doc/README.md44
-rw-r--r--xlators/features/glupy/doc/TESTING9
-rw-r--r--xlators/features/glupy/doc/test.vol10
-rw-r--r--xlators/features/glupy/examples/Makefile.am5
-rw-r--r--xlators/features/glupy/examples/debug-trace.py775
-rw-r--r--xlators/features/glupy/examples/helloworld.py19
-rw-r--r--xlators/features/glupy/examples/negative.py91
-rw-r--r--xlators/features/glupy/src/Makefile.am29
-rw-r--r--xlators/features/glupy/src/__init__.py.in2
-rw-r--r--xlators/features/glupy/src/glupy.c2496
-rw-r--r--xlators/features/glupy/src/glupy.h56
-rw-r--r--xlators/features/glupy/src/glupy.sym101
-rw-r--r--xlators/features/glupy/src/glupy/Makefile.am5
-rw-r--r--xlators/features/glupy/src/glupy/__init__.py852
-rw-r--r--xlators/features/glupy/src/setup.py.in24
-rw-r--r--xlators/features/index/src/Makefile.am8
-rw-r--r--xlators/features/index/src/index-mem-types.h15
-rw-r--r--xlators/features/index/src/index-messages.h33
-rw-r--r--xlators/features/index/src/index.c3735
-rw-r--r--xlators/features/index/src/index.h96
-rw-r--r--xlators/features/leases/Makefile.am (renamed from xlators/features/changetimerecorder/Makefile.am)0
-rw-r--r--xlators/features/leases/src/Makefile.am20
-rw-r--r--xlators/features/leases/src/leases-internal.c1412
-rw-r--r--xlators/features/leases/src/leases-mem-types.h27
-rw-r--r--xlators/features/leases/src/leases-messages.h33
-rw-r--r--xlators/features/leases/src/leases.c1168
-rw-r--r--xlators/features/leases/src/leases.h259
-rw-r--r--xlators/features/locks/src/Makefile.am10
-rw-r--r--xlators/features/locks/src/clear.c684
-rw-r--r--xlators/features/locks/src/clear.h66
-rw-r--r--xlators/features/locks/src/common.c1948
-rw-r--r--xlators/features/locks/src/common.h256
-rw-r--r--xlators/features/locks/src/entrylk.c1487
-rw-r--r--xlators/features/locks/src/inodelk.c1580
-rw-r--r--xlators/features/locks/src/locks-mem-types.h23
-rw-r--r--xlators/features/locks/src/locks.h326
-rw-r--r--xlators/features/locks/src/pl-messages.h59
-rw-r--r--xlators/features/locks/src/posix.c6408
-rw-r--r--xlators/features/locks/src/reservelk.c580
-rw-r--r--xlators/features/locks/tests/unit-test.c101
-rw-r--r--xlators/features/mac-compat/Makefile.am3
-rw-r--r--xlators/features/mac-compat/src/Makefile.am15
-rw-r--r--xlators/features/mac-compat/src/mac-compat.c344
-rw-r--r--xlators/features/mac-compat/src/mac-compat.h41
-rw-r--r--xlators/features/marker/src/Makefile.am11
-rw-r--r--xlators/features/marker/src/marker-common.c74
-rw-r--r--xlators/features/marker/src/marker-common.h7
-rw-r--r--xlators/features/marker/src/marker-mem-types.h23
-rw-r--r--xlators/features/marker/src/marker-quota-helper.c627
-rw-r--r--xlators/features/marker/src/marker-quota-helper.h81
-rw-r--r--xlators/features/marker/src/marker-quota.c3508
-rw-r--r--xlators/features/marker/src/marker-quota.h202
-rw-r--r--xlators/features/marker/src/marker.c4776
-rw-r--r--xlators/features/marker/src/marker.h227
-rw-r--r--xlators/features/metadisp/Makefile.am (renamed from xlators/features/ganesha/Makefile.am)0
-rw-r--r--xlators/features/metadisp/src/Makefile.am38
-rw-r--r--xlators/features/metadisp/src/backend.c45
-rw-r--r--xlators/features/metadisp/src/fops-tmpl.c10
-rw-r--r--xlators/features/metadisp/src/gen-fops.py160
-rw-r--r--xlators/features/metadisp/src/metadisp-create.c101
-rw-r--r--xlators/features/metadisp/src/metadisp-fops.h51
-rw-r--r--xlators/features/metadisp/src/metadisp-fsync.c54
-rw-r--r--xlators/features/metadisp/src/metadisp-lookup.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-open.c70
-rw-r--r--xlators/features/metadisp/src/metadisp-readdir.c65
-rw-r--r--xlators/features/metadisp/src/metadisp-setattr.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-stat.c124
-rw-r--r--xlators/features/metadisp/src/metadisp-unlink.c160
-rw-r--r--xlators/features/metadisp/src/metadisp.c46
-rw-r--r--xlators/features/metadisp/src/metadisp.h45
-rw-r--r--xlators/features/namespace/Makefile.am (renamed from xlators/storage/bd/Makefile.am)0
-rw-r--r--xlators/features/namespace/src/Makefile.am17
-rw-r--r--xlators/features/namespace/src/namespace.c1344
-rw-r--r--xlators/features/namespace/src/namespace.h23
-rw-r--r--xlators/features/path-convertor/Makefile.am3
-rw-r--r--xlators/features/path-convertor/src/Makefile.am15
-rw-r--r--xlators/features/path-convertor/src/path-mem-types.h22
-rw-r--r--xlators/features/path-convertor/src/path.c1223
-rw-r--r--xlators/features/protect/Makefile.am3
-rw-r--r--xlators/features/protect/src/Makefile.am21
-rw-r--r--xlators/features/protect/src/prot_client.c213
-rw-r--r--xlators/features/protect/src/prot_dht.c163
-rw-r--r--xlators/features/protect/src/prot_server.c46
-rw-r--r--xlators/features/quiesce/src/Makefile.am5
-rw-r--r--xlators/features/quiesce/src/quiesce-mem-types.h7
-rw-r--r--xlators/features/quiesce/src/quiesce-messages.h28
-rw-r--r--xlators/features/quiesce/src/quiesce.c3365
-rw-r--r--xlators/features/quiesce/src/quiesce.h68
-rw-r--r--xlators/features/quota/src/Makefile.am22
-rw-r--r--xlators/features/quota/src/quota-enforcer-client.c724
-rw-r--r--xlators/features/quota/src/quota-mem-types.h28
-rw-r--r--xlators/features/quota/src/quota-messages.h252
-rw-r--r--xlators/features/quota/src/quota.c8289
-rw-r--r--xlators/features/quota/src/quota.h420
-rw-r--r--xlators/features/quota/src/quotad-aggregator.c757
-rw-r--r--xlators/features/quota/src/quotad-aggregator.h29
-rw-r--r--xlators/features/quota/src/quotad-helpers.c116
-rw-r--r--xlators/features/quota/src/quotad-helpers.h4
-rw-r--r--xlators/features/quota/src/quotad.c315
-rw-r--r--xlators/features/quota/src/quotad.sym7
-rw-r--r--xlators/features/read-only/src/Makefile.am7
-rw-r--r--xlators/features/read-only/src/read-only-common.c545
-rw-r--r--xlators/features/read-only/src/read-only-common.h112
-rw-r--r--xlators/features/read-only/src/read-only-mem-types.h6
-rw-r--r--xlators/features/read-only/src/read-only.c176
-rw-r--r--xlators/features/read-only/src/read-only.h22
-rw-r--r--xlators/features/read-only/src/worm-helper.c395
-rw-r--r--xlators/features/read-only/src/worm-helper.h44
-rw-r--r--xlators/features/read-only/src/worm.c746
-rw-r--r--xlators/features/sdfs/Makefile.am (renamed from xlators/encryption/rot-13/Makefile.am)2
-rw-r--r--xlators/features/sdfs/src/Makefile.am19
-rw-r--r--xlators/features/sdfs/src/sdfs-messages.h67
-rw-r--r--xlators/features/sdfs/src/sdfs.c1479
-rw-r--r--xlators/features/sdfs/src/sdfs.h49
-rw-r--r--xlators/features/selinux/Makefile.am (renamed from xlators/cluster/map/Makefile.am)2
-rw-r--r--xlators/features/selinux/src/Makefile.am20
-rw-r--r--xlators/features/selinux/src/selinux-mem-types.h (renamed from xlators/features/filter/src/filter-mem-types.h)15
-rw-r--r--xlators/features/selinux/src/selinux-messages.h30
-rw-r--r--xlators/features/selinux/src/selinux.c323
-rw-r--r--xlators/features/selinux/src/selinux.h24
-rw-r--r--xlators/features/shard/src/Makefile.am3
-rw-r--r--xlators/features/shard/src/shard-mem-types.h15
-rw-r--r--xlators/features/shard/src/shard-messages.h193
-rw-r--r--xlators/features/shard/src/shard.c10061
-rw-r--r--xlators/features/shard/src/shard.h518
-rw-r--r--xlators/features/snapview-client/src/Makefile.am5
-rw-r--r--xlators/features/snapview-client/src/snapview-client-mem-types.h12
-rw-r--r--xlators/features/snapview-client/src/snapview-client-messages.h71
-rw-r--r--xlators/features/snapview-client/src/snapview-client.c4101
-rw-r--r--xlators/features/snapview-client/src/snapview-client.h176
-rw-r--r--xlators/features/snapview-server/src/Makefile.am23
-rw-r--r--xlators/features/snapview-server/src/snapview-server-helpers.c905
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mem-types.h15
-rw-r--r--xlators/features/snapview-server/src/snapview-server-messages.h54
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mgmt.c840
-rw-r--r--xlators/features/snapview-server/src/snapview-server.c4188
-rw-r--r--xlators/features/snapview-server/src/snapview-server.h311
-rw-r--r--xlators/features/thin-arbiter/Makefile.am (renamed from xlators/cluster/stripe/Makefile.am)2
-rw-r--r--xlators/features/thin-arbiter/src/Makefile.am22
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter-mem-types.h19
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter-messages.h28
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter.c661
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter.h59
-rw-r--r--xlators/features/trash/src/Makefile.am5
-rw-r--r--xlators/features/trash/src/trash-mem-types.h13
-rw-r--r--xlators/features/trash/src/trash.c4318
-rw-r--r--xlators/features/trash/src/trash.h100
-rw-r--r--xlators/features/upcall/src/Makefile.am10
-rw-r--r--xlators/features/upcall/src/upcall-cache-invalidation.h38
-rw-r--r--xlators/features/upcall/src/upcall-internal.c1028
-rw-r--r--xlators/features/upcall/src/upcall-mem-types.h13
-rw-r--r--xlators/features/upcall/src/upcall-messages.h54
-rw-r--r--xlators/features/upcall/src/upcall.c3061
-rw-r--r--xlators/features/upcall/src/upcall.h199
-rw-r--r--xlators/features/utime/Makefile.am3
-rw-r--r--xlators/features/utime/src/Makefile.am41
-rw-r--r--xlators/features/utime/src/utime-autogen-fops-tmpl.c28
-rw-r--r--xlators/features/utime/src/utime-autogen-fops-tmpl.h22
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-c.py147
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-h.py35
-rw-r--r--xlators/features/utime/src/utime-helpers.c110
-rw-r--r--xlators/features/utime/src/utime-helpers.h25
-rw-r--r--xlators/features/utime/src/utime-mem-types.h21
-rw-r--r--xlators/features/utime/src/utime-messages.h29
-rw-r--r--xlators/features/utime/src/utime.c392
-rw-r--r--xlators/features/utime/src/utime.h23
-rw-r--r--xlators/lib/src/libxlator.c796
-rw-r--r--xlators/lib/src/libxlator.h112
-rw-r--r--xlators/meta/Makefile.am2
-rw-r--r--xlators/meta/src/Makefile.am3
-rw-r--r--xlators/meta/src/active-link.c25
-rw-r--r--xlators/meta/src/cmdline-file.c32
-rw-r--r--xlators/meta/src/frames-file.c164
-rw-r--r--xlators/meta/src/graph-dir.c129
-rw-r--r--xlators/meta/src/graphs-dir.c87
-rw-r--r--xlators/meta/src/history-file.c33
-rw-r--r--xlators/meta/src/logfile-link.c25
-rw-r--r--xlators/meta/src/logging-dir.c42
-rw-r--r--xlators/meta/src/loglevel-file.c40
-rw-r--r--xlators/meta/src/mallinfo-file.c25
-rw-r--r--xlators/meta/src/measure-file.c37
-rw-r--r--xlators/meta/src/meminfo-file.c33
-rw-r--r--xlators/meta/src/meta-defaults.c775
-rw-r--r--xlators/meta/src/meta-helpers.c445
-rw-r--r--xlators/meta/src/meta-hooks.h6
-rw-r--r--xlators/meta/src/meta-mem-types.h17
-rw-r--r--xlators/meta/src/meta.c293
-rw-r--r--xlators/meta/src/meta.h181
-rw-r--r--xlators/meta/src/name-file.c34
-rw-r--r--xlators/meta/src/option-file.c36
-rw-r--r--xlators/meta/src/options-dir.c62
-rw-r--r--xlators/meta/src/private-file.c33
-rw-r--r--xlators/meta/src/process_uuid-file.c28
-rw-r--r--xlators/meta/src/profile-file.c33
-rw-r--r--xlators/meta/src/root-dir.c105
-rw-r--r--xlators/meta/src/subvolume-link.c67
-rw-r--r--xlators/meta/src/subvolumes-dir.c65
-rw-r--r--xlators/meta/src/top-link.c31
-rw-r--r--xlators/meta/src/type-file.c34
-rw-r--r--xlators/meta/src/version-file.c29
-rw-r--r--xlators/meta/src/view-dir.c27
-rw-r--r--xlators/meta/src/volfile-file.c71
-rw-r--r--xlators/meta/src/xlator-dir.c130
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am58
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.c287
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.h16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c1265
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c4923
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-helper.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-helper.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c223
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-errno.h33
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ganesha.c1448
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c10887
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c235
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.h51
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c478
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h47
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c10233
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c4049
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c984
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.h80
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c1258
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c455
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h101
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h5068
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c1759
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c4847
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h86
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.c1188
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.h35
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c313
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c13465
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h327
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c1559
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.h67
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c877
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.h58
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c181
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h34
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c3549
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.h (renamed from xlators/features/ganesha/src/ganesha-mem-types.h)16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c323
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rcu.h6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c2206
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c1427
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-reset-brick.c376
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c4144
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c284
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.h22
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c703
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.h26
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c153
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h42
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c920
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.h25
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c2578
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h254
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.c66
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.h16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c749
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.h20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c7071
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h157
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c17416
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c366
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c8079
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h257
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c1112
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.h50
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c646
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h100
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c3307
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h102
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c207
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c21974
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h826
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c10387
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h344
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c5305
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c5567
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c3588
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h1773
-rw-r--r--xlators/mount/fuse/src/Makefile.am7
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.c10397
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.h796
-rw-r--r--xlators/mount/fuse/src/fuse-helpers.c1066
-rw-r--r--xlators/mount/fuse/src/fuse-mem-types.h26
-rw-r--r--xlators/mount/fuse/src/fuse-resolve.c1036
-rwxr-xr-xxlators/mount/fuse/utils/mount.glusterfs.in195
-rwxr-xr-xxlators/mount/fuse/utils/mount_glusterfs.in26
-rw-r--r--xlators/nfs/server/src/Makefile.am19
-rw-r--r--xlators/nfs/server/src/acl3.c1585
-rw-r--r--xlators/nfs/server/src/acl3.h18
-rw-r--r--xlators/nfs/server/src/auth-cache.c535
-rw-r--r--xlators/nfs/server/src/auth-cache.h26
-rw-r--r--xlators/nfs/server/src/exports.c1632
-rw-r--r--xlators/nfs/server/src/exports.h69
-rw-r--r--xlators/nfs/server/src/mount3-auth.c682
-rw-r--r--xlators/nfs/server/src/mount3-auth.h28
-rw-r--r--xlators/nfs/server/src/mount3.c6357
-rw-r--r--xlators/nfs/server/src/mount3.h209
-rw-r--r--xlators/nfs/server/src/mount3udp_svc.c344
-rw-r--r--xlators/nfs/server/src/netgroups.c1012
-rw-r--r--xlators/nfs/server/src/netgroups.h31
-rw-r--r--xlators/nfs/server/src/nfs-common.c653
-rw-r--r--xlators/nfs/server/src/nfs-common.h51
-rw-r--r--xlators/nfs/server/src/nfs-fops.c2366
-rw-r--r--xlators/nfs/server/src/nfs-fops.h305
-rw-r--r--xlators/nfs/server/src/nfs-generics.c336
-rw-r--r--xlators/nfs/server/src/nfs-generics.h146
-rw-r--r--xlators/nfs/server/src/nfs-inodes.c768
-rw-r--r--xlators/nfs/server/src/nfs-inodes.h60
-rw-r--r--xlators/nfs/server/src/nfs-mem-types.h70
-rw-r--r--xlators/nfs/server/src/nfs-messages.h1743
-rw-r--r--xlators/nfs/server/src/nfs.c3448
-rw-r--r--xlators/nfs/server/src/nfs.h174
-rw-r--r--xlators/nfs/server/src/nfs3-fh.c216
-rw-r--r--xlators/nfs/server/src/nfs3-fh.h105
-rw-r--r--xlators/nfs/server/src/nfs3-helpers.c5063
-rw-r--r--xlators/nfs/server/src/nfs3-helpers.h280
-rw-r--r--xlators/nfs/server/src/nfs3.c9202
-rw-r--r--xlators/nfs/server/src/nfs3.h382
-rw-r--r--xlators/nfs/server/src/nfsserver.sym10
-rw-r--r--xlators/nfs/server/src/nlm4.c4333
-rw-r--r--xlators/nfs/server/src/nlm4.h126
-rw-r--r--xlators/nfs/server/src/nlmcbk_svc.c176
-rw-r--r--xlators/performance/Makefile.am3
-rw-r--r--xlators/performance/io-cache/src/Makefile.am1
-rw-r--r--xlators/performance/io-cache/src/io-cache-messages.h170
-rw-r--r--xlators/performance/io-cache/src/io-cache.c3099
-rw-r--r--xlators/performance/io-cache/src/io-cache.h400
-rw-r--r--xlators/performance/io-cache/src/ioc-inode.c292
-rw-r--r--xlators/performance/io-cache/src/ioc-mem-types.h24
-rw-r--r--xlators/performance/io-cache/src/page.c1456
-rw-r--r--xlators/performance/io-threads/src/Makefile.am3
-rw-r--r--xlators/performance/io-threads/src/io-threads-messages.h108
-rw-r--r--xlators/performance/io-threads/src/io-threads.c1968
-rw-r--r--xlators/performance/io-threads/src/io-threads.h115
-rw-r--r--xlators/performance/io-threads/src/iot-mem-types.h9
-rw-r--r--xlators/performance/md-cache/src/Makefile.am1
-rw-r--r--xlators/performance/md-cache/src/md-cache-mem-types.h13
-rw-r--r--xlators/performance/md-cache/src/md-cache-messages.h57
-rw-r--r--xlators/performance/md-cache/src/md-cache.c4724
-rw-r--r--xlators/performance/nl-cache/Makefile.am3
-rw-r--r--xlators/performance/nl-cache/src/Makefile.am12
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-helper.c1201
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-mem-types.h27
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-messages.h29
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.c840
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.h175
-rw-r--r--xlators/performance/open-behind/src/Makefile.am3
-rw-r--r--xlators/performance/open-behind/src/open-behind-mem-types.h9
-rw-r--r--xlators/performance/open-behind/src/open-behind-messages.h77
-rw-r--r--xlators/performance/open-behind/src/open-behind.c1623
-rw-r--r--xlators/performance/quick-read/src/Makefile.am3
-rw-r--r--xlators/performance/quick-read/src/quick-read-mem-types.h16
-rw-r--r--xlators/performance/quick-read/src/quick-read-messages.h121
-rw-r--r--xlators/performance/quick-read/src/quick-read.c2140
-rw-r--r--xlators/performance/quick-read/src/quick-read.h83
-rw-r--r--xlators/performance/read-ahead/src/Makefile.am3
-rw-r--r--xlators/performance/read-ahead/src/page.c847
-rw-r--r--xlators/performance/read-ahead/src/read-ahead-mem-types.h17
-rw-r--r--xlators/performance/read-ahead/src/read-ahead-messages.h104
-rw-r--r--xlators/performance/read-ahead/src/read-ahead.c1839
-rw-r--r--xlators/performance/read-ahead/src/read-ahead.h178
-rw-r--r--xlators/performance/readdir-ahead/src/Makefile.am7
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead-mem-types.h12
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead-messages.h97
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.c1581
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.h94
-rw-r--r--xlators/performance/symlink-cache/Makefile.am3
-rw-r--r--xlators/performance/symlink-cache/src/Makefile.am15
-rw-r--r--xlators/performance/symlink-cache/src/symlink-cache-messages.h93
-rw-r--r--xlators/performance/symlink-cache/src/symlink-cache.c402
-rw-r--r--xlators/performance/write-behind/src/Makefile.am3
-rw-r--r--xlators/performance/write-behind/src/write-behind-mem-types.h16
-rw-r--r--xlators/performance/write-behind/src/write-behind-messages.h114
-rw-r--r--xlators/performance/write-behind/src/write-behind.c4395
-rw-r--r--xlators/playground/rot-13/Makefile.am (renamed from xlators/cluster/ha/Makefile.am)0
-rw-r--r--xlators/playground/rot-13/src/Makefile.am (renamed from xlators/encryption/rot-13/src/Makefile.am)3
-rw-r--r--xlators/playground/rot-13/src/rot-13.c166
-rw-r--r--xlators/playground/rot-13/src/rot-13.h (renamed from xlators/encryption/rot-13/src/rot-13.h)4
-rw-r--r--xlators/playground/template/src/Makefile.am5
-rw-r--r--xlators/playground/template/src/template.c182
-rw-r--r--xlators/playground/template/src/template.h36
-rw-r--r--xlators/protocol/auth/addr/src/Makefile.am6
-rw-r--r--xlators/protocol/auth/addr/src/addr.c470
-rw-r--r--xlators/protocol/auth/login/src/Makefile.am5
-rw-r--r--xlators/protocol/auth/login/src/login.c318
-rw-r--r--xlators/protocol/client/src/Makefile.am11
-rw-r--r--xlators/protocol/client/src/client-callback.c342
-rw-r--r--xlators/protocol/client/src/client-common.c3589
-rw-r--r--xlators/protocol/client/src/client-common.h630
-rw-r--r--xlators/protocol/client/src/client-handshake.c2661
-rw-r--r--xlators/protocol/client/src/client-helpers.c1029
-rw-r--r--xlators/protocol/client/src/client-lk.c755
-rw-r--r--xlators/protocol/client/src/client-mem-types.h17
-rw-r--r--xlators/protocol/client/src/client-messages.h769
-rw-r--r--xlators/protocol/client/src/client-rpc-fops.c10563
-rw-r--r--xlators/protocol/client/src/client-rpc-fops_v2.c6177
-rw-r--r--xlators/protocol/client/src/client.c4306
-rw-r--r--xlators/protocol/client/src/client.h575
-rw-r--r--xlators/protocol/server/src/Makefile.am18
-rw-r--r--xlators/protocol/server/src/authenticate.c327
-rw-r--r--xlators/protocol/server/src/authenticate.h34
-rw-r--r--xlators/protocol/server/src/server-common.c842
-rw-r--r--xlators/protocol/server/src/server-common.h199
-rw-r--r--xlators/protocol/server/src/server-handshake.c1513
-rw-r--r--xlators/protocol/server/src/server-helpers.c2171
-rw-r--r--xlators/protocol/server/src/server-helpers.h94
-rw-r--r--xlators/protocol/server/src/server-mem-types.h24
-rw-r--r--xlators/protocol/server/src/server-messages.h986
-rw-r--r--xlators/protocol/server/src/server-resolve.c962
-rw-r--r--xlators/protocol/server/src/server-rpc-fops.c9277
-rw-r--r--xlators/protocol/server/src/server-rpc-fops_v2.c6031
-rw-r--r--xlators/protocol/server/src/server.c2899
-rw-r--r--xlators/protocol/server/src/server.h283
-rw-r--r--xlators/storage/Makefile.am4
-rw-r--r--xlators/storage/bd/src/Makefile.am20
-rw-r--r--xlators/storage/bd/src/bd-aio.c523
-rw-r--r--xlators/storage/bd/src/bd-aio.h36
-rw-r--r--xlators/storage/bd/src/bd-helper.c1020
-rw-r--r--xlators/storage/bd/src/bd-mem-types.h27
-rw-r--r--xlators/storage/bd/src/bd.c2448
-rw-r--r--xlators/storage/bd/src/bd.h168
-rw-r--r--xlators/storage/posix/src/Makefile.am18
-rw-r--r--xlators/storage/posix/src/posix-aio.c954
-rw-r--r--xlators/storage/posix/src/posix-aio.h22
-rw-r--r--xlators/storage/posix/src/posix-common.c1524
-rw-r--r--xlators/storage/posix/src/posix-entry-ops.c2496
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.c243
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.h28
-rw-r--r--xlators/storage/posix/src/posix-handle.c1635
-rw-r--r--xlators/storage/posix/src/posix-handle.h427
-rw-r--r--xlators/storage/posix/src/posix-helpers.c4892
-rw-r--r--xlators/storage/posix/src/posix-inode-fd-ops.c6004
-rw-r--r--xlators/storage/posix/src/posix-inode-handle.h118
-rw-r--r--xlators/storage/posix/src/posix-mem-types.h21
-rw-r--r--xlators/storage/posix/src/posix-messages.h975
-rw-r--r--xlators/storage/posix/src/posix-metadata-disk.h31
-rw-r--r--xlators/storage/posix/src/posix-metadata.c916
-rw-r--r--xlators/storage/posix/src/posix-metadata.h71
-rw-r--r--xlators/storage/posix/src/posix.c7227
-rw-r--r--xlators/storage/posix/src/posix.h756
-rw-r--r--xlators/system/posix-acl/src/Makefile.am6
-rw-r--r--xlators/system/posix-acl/src/posix-acl-mem-types.h13
-rw-r--r--xlators/system/posix-acl/src/posix-acl-messages.h28
-rw-r--r--xlators/system/posix-acl/src/posix-acl-xattr.c229
-rw-r--r--xlators/system/posix-acl/src/posix-acl-xattr.h17
-rw-r--r--xlators/system/posix-acl/src/posix-acl.c3334
-rw-r--r--xlators/system/posix-acl/src/posix-acl.h33
-rw-r--r--xlators/xlator.sym10
758 files changed, 364043 insertions, 328444 deletions
diff --git a/xlators/Makefile.am b/xlators/Makefile.am
index ea1be844ef4..ef20cbb64fa 100644
--- a/xlators/Makefile.am
+++ b/xlators/Makefile.am
@@ -1,9 +1,12 @@
-if ENABLE_EXPERIMENTAL
- EXPERIMENTAL = experimental
+if BUILD_GNFS
+ GNFS_DIR = nfs
endif
-SUBDIRS = cluster storage protocol performance debug features encryption mount nfs mgmt system \
- playground meta $(EXPERIMENTAL)
+DIST_SUBDIRS = cluster storage protocol performance debug features \
+ mount nfs mgmt system playground meta
+
+SUBDIRS = cluster storage protocol performance debug features \
+ mount ${GNFS_DIR} mgmt system playground meta
EXTRA_DIST = xlator.sym
diff --git a/xlators/cluster/Makefile.am b/xlators/cluster/Makefile.am
index 903fbb39f12..8e067d5ab58 100644
--- a/xlators/cluster/Makefile.am
+++ b/xlators/cluster/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = stripe afr dht ec
+SUBDIRS = afr dht ec
CLEANFILES =
diff --git a/xlators/cluster/afr/src/Makefile.am b/xlators/cluster/afr/src/Makefile.am
index 5612733d3ed..610819b28fc 100644
--- a/xlators/cluster/afr/src/Makefile.am
+++ b/xlators/cluster/afr/src/Makefile.am
@@ -1,4 +1,4 @@
-xlator_LTLIBRARIES = afr.la pump.la
+xlator_LTLIBRARIES = afr.la
xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/cluster
afr_common_source = afr-dir-read.c afr-dir-write.c afr-inode-read.c \
@@ -14,18 +14,15 @@ afr_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
afr_la_SOURCES = $(afr_common_source) $(AFR_SELFHEAL_SOURCES) afr.c
afr_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-pump_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
-pump_la_SOURCES = $(afr_common_source) $(AFR_SELFHEAL_SOURCES) pump.c
-pump_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-
noinst_HEADERS = afr.h afr-transaction.h afr-inode-write.h afr-inode-read.h \
afr-dir-read.h afr-dir-write.h afr-self-heal.h afr-mem-types.h \
- afr-common.c afr-self-heald.h pump.h \
+ afr-common.c afr-self-heald.h \
$(top_builddir)/xlators/lib/src/libxlator.h afr-messages.h
AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/xlators/lib/src \
- -I$(top_srcdir)/rpc/rpc-lib/src
+ -I$(top_srcdir)/rpc/rpc-lib/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
AM_CFLAGS = -Wall $(GF_CFLAGS)
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index f4d74c57a5d..032ab5c8001 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -15,24 +15,20 @@
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "afr.h"
-#include "dict.h"
-#include "xlator.h"
-#include "hashfn.h"
-#include "logging.h"
-#include "stack.h"
-#include "list.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "common-utils.h"
-#include "compat-errno.h"
-#include "compat.h"
-#include "byte-order.h"
-#include "statedump.h"
-#include "inode.h"
-
-#include "fd.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/hashfn.h>
+#include <glusterfs/list.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/statedump.h>
+#include <glusterfs/events.h>
+#include <glusterfs/upcall-utils.h>
#include "afr-inode-read.h"
#include "afr-inode-write.h"
@@ -43,87 +39,799 @@
#include "afr-self-heald.h"
#include "afr-messages.h"
-call_frame_t *
-afr_copy_frame (call_frame_t *base)
+int32_t
+afr_quorum_errno(afr_private_t *priv)
+{
+ return ENOTCONN;
+}
+
+gf_boolean_t
+afr_is_private_directory(afr_private_t *priv, uuid_t pargfid, const char *name,
+ pid_t pid)
{
- afr_local_t *local = NULL;
- call_frame_t *frame = NULL;
- int op_errno = 0;
+ if (!__is_root_gfid(pargfid)) {
+ return _gf_false;
+ }
+
+ if (strcmp(name, GF_REPLICATE_TRASH_DIR) == 0) {
+ /*For backward compatibility /.landfill is private*/
+ return _gf_true;
+ }
+
+ if (pid == GF_CLIENT_PID_GSYNCD) {
+ /*geo-rep needs to create/sync private directory on slave because
+ * it appears in changelog*/
+ return _gf_false;
+ }
- frame = copy_frame (base);
- if (!frame)
- return NULL;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local) {
- AFR_STACK_DESTROY (frame);
- return NULL;
- }
+ if (pid == GF_CLIENT_PID_GLFS_HEAL || pid == GF_CLIENT_PID_SELF_HEALD) {
+ if (strcmp(name, priv->anon_inode_name) == 0) {
+ /* anonymous-inode dir is private*/
+ return _gf_true;
+ }
+ } else {
+ if (strncmp(name, AFR_ANON_DIR_PREFIX, strlen(AFR_ANON_DIR_PREFIX)) ==
+ 0) {
+ /* anonymous-inode dir prefix is private for geo-rep to work*/
+ return _gf_true;
+ }
+ }
- return frame;
+ return _gf_false;
}
-/* Check if an entry or inode could be undergoing a transaction. */
-gf_boolean_t
-afr_is_possibly_under_txn (afr_transaction_type type, afr_local_t *local,
- xlator_t *this)
+void
+afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
+ unsigned char *replies)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].valid && local->replies[i].op_ret == 0) {
+ replies[i] = 1;
+ } else {
+ replies[i] = 0;
+ }
+ }
+}
+
+int
+afr_fav_child_reset_sink_xattrs(void *opaque);
+
+int
+afr_fav_child_reset_sink_xattrs_cbk(int ret, call_frame_t *frame, void *opaque);
+
+static void
+afr_discover_done(call_frame_t *frame, xlator_t *this);
+
+int
+afr_dom_lock_acquire_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xdata)
{
- int i = 0;
- int tmp = 0;
- afr_private_t *priv = NULL;
- GF_UNUSED char *key = NULL;
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int i = (long)cookie;
+
+ local->cont.lk.dom_lock_op_ret[i] = op_ret;
+ local->cont.lk.dom_lock_op_errno[i] = op_errno;
+ if (op_ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to acquire %s on %s",
+ uuid_utoa(local->fd->inode->gfid), AFR_LK_HEAL_DOM,
+ priv->children[i]->name);
+ } else {
+ local->cont.lk.dom_locked_nodes[i] = 1;
+ }
- priv = this->private;
+ syncbarrier_wake(&local->barrier);
- if (type == AFR_ENTRY_TRANSACTION)
- key = GLUSTERFS_PARENT_ENTRYLK;
- else if (type == AFR_DATA_TRANSACTION)
- /*FIXME: Use GLUSTERFS_INODELK_DOM_COUNT etc. once
- * pl_inodelk_xattr_fill supports separate keys for different
- * domains.*/
- key = GLUSTERFS_INODELK_COUNT;
+ return 0;
+}
+int
+afr_dom_lock_acquire(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+ int i = 0;
+
+ priv = frame->this->private;
+ local = frame->local;
+ local->cont.lk.dom_locked_nodes = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.locked_nodes),
+ gf_afr_mt_char);
+ if (!local->cont.lk.dom_locked_nodes) {
+ return -ENOMEM;
+ }
+ local->cont.lk.dom_lock_op_ret = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.dom_lock_op_ret),
+ gf_afr_mt_int32_t);
+ if (!local->cont.lk.dom_lock_op_ret) {
+ return -ENOMEM; /* CALLOC'd members are freed in afr_local_cleanup. */
+ }
+ local->cont.lk.dom_lock_op_errno = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.dom_lock_op_errno),
+ gf_afr_mt_int32_t);
+ if (!local->cont.lk.dom_lock_op_errno) {
+ return -ENOMEM; /* CALLOC'd members are freed in afr_local_cleanup. */
+ }
+ flock.l_type = F_WRLCK;
+
+ AFR_ONALL(frame, afr_dom_lock_acquire_cbk, finodelk, AFR_LK_HEAL_DOM,
+ local->fd, F_SETLK, &flock, NULL);
+
+ if (!afr_has_quorum(local->cont.lk.dom_locked_nodes, frame->this, NULL))
+ goto blocking_lock;
+
+ /*If any of the bricks returned EAGAIN, we still need blocking locks.*/
+ if (AFR_COUNT(local->cont.lk.dom_locked_nodes, priv->child_count) !=
+ priv->child_count) {
for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].xdata)
- continue;
- if (dict_get_int32 (local->replies[i].xdata, key, &tmp) == 0)
- if (tmp)
- return _gf_true;
- }
+ if (local->cont.lk.dom_lock_op_ret[i] == -1 &&
+ local->cont.lk.dom_lock_op_errno[i] == EAGAIN)
+ goto blocking_lock;
+ }
+ }
- return _gf_false;
+ return 0;
+
+blocking_lock:
+ afr_dom_lock_release(frame);
+ AFR_ONALL(frame, afr_dom_lock_acquire_cbk, finodelk, AFR_LK_HEAL_DOM,
+ local->fd, F_SETLKW, &flock, NULL);
+ if (!afr_has_quorum(local->cont.lk.dom_locked_nodes, frame->this, NULL)) {
+ afr_dom_lock_release(frame);
+ return -afr_quorum_errno(priv);
+ }
+
+ return 0;
}
int
-__afr_inode_ctx_get (xlator_t *this, inode_t *inode, afr_inode_ctx_t **ctx)
+afr_dom_lock_release_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xdata)
{
- uint64_t ctx_int = 0;
- int ret = -1;
- afr_inode_ctx_t *tmp_ctx = NULL;
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int i = (long)cookie;
- ret = __inode_ctx_get (inode, this, &ctx_int);
- if (ret) {
- tmp_ctx = GF_CALLOC (1, sizeof (afr_inode_ctx_t),
- gf_afr_mt_inode_ctx_t);
- if (!tmp_ctx)
- goto out;
-
- ctx_int = (long) tmp_ctx;
- ret = __inode_ctx_set (inode, this, &ctx_int);
- if (ret) {
- GF_FREE (tmp_ctx);
- goto out;
- }
- tmp_ctx->spb_choice = -1;
- tmp_ctx->read_subvol = 0;
- } else {
- tmp_ctx = (afr_inode_ctx_t *) ctx_int;
+ if (op_ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to release %s on %s", local->loc.path,
+ AFR_LK_HEAL_DOM, priv->children[i]->name);
+ }
+ local->cont.lk.dom_locked_nodes[i] = 0;
+
+ syncbarrier_wake(&local->barrier);
+
+ return 0;
+}
+
+void
+afr_dom_lock_release(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ unsigned char *locked_on = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+
+ local = frame->local;
+ priv = frame->this->private;
+ locked_on = local->cont.lk.dom_locked_nodes;
+ if (AFR_COUNT(locked_on, priv->child_count) == 0)
+ return;
+ flock.l_type = F_UNLCK;
+
+ AFR_ONLIST(locked_on, frame, afr_dom_lock_release_cbk, finodelk,
+ AFR_LK_HEAL_DOM, local->fd, F_SETLK, &flock, NULL);
+
+ return;
+}
+
+static void
+afr_lk_heal_info_cleanup(afr_lk_heal_info_t *info)
+{
+ if (!info)
+ return;
+ if (info->xdata_req)
+ dict_unref(info->xdata_req);
+ if (info->fd)
+ fd_unref(info->fd);
+ GF_FREE(info->locked_nodes);
+ GF_FREE(info->child_up_event_gen);
+ GF_FREE(info->child_down_event_gen);
+ GF_FREE(info);
+}
+
+static int
+afr_add_lock_to_saved_locks(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ afr_local_t *local = frame->local;
+ afr_lk_heal_info_t *info = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int ret = -ENOMEM;
+
+ info = GF_CALLOC(sizeof(*info), 1, gf_afr_mt_lk_heal_info_t);
+ if (!info) {
+ goto cleanup;
+ }
+ INIT_LIST_HEAD(&info->pos);
+ info->fd = fd_ref(local->fd);
+ info->cmd = local->cont.lk.cmd;
+ info->pid = frame->root->pid;
+ info->flock = local->cont.lk.user_flock;
+ info->xdata_req = dict_copy_with_ref(local->xdata_req, NULL);
+ if (!info->xdata_req) {
+ goto cleanup;
+ }
+ info->lk_owner = frame->root->lk_owner;
+ info->locked_nodes = GF_MALLOC(
+ sizeof(*info->locked_nodes) * priv->child_count, gf_afr_mt_char);
+ if (!info->locked_nodes) {
+ goto cleanup;
+ }
+ memcpy(info->locked_nodes, local->cont.lk.locked_nodes,
+ sizeof(*info->locked_nodes) * priv->child_count);
+ info->child_up_event_gen = GF_CALLOC(sizeof(*info->child_up_event_gen),
+ priv->child_count, gf_afr_mt_int32_t);
+ if (!info->child_up_event_gen) {
+ goto cleanup;
+ }
+ info->child_down_event_gen = GF_CALLOC(sizeof(*info->child_down_event_gen),
+ priv->child_count,
+ gf_afr_mt_int32_t);
+ if (!info->child_down_event_gen) {
+ goto cleanup;
+ }
+
+ LOCK(&local->fd->lock);
+ {
+ fd_ctx = __afr_fd_ctx_get(local->fd, this);
+ if (fd_ctx)
+ fd_ctx->lk_heal_info = info;
+ }
+ UNLOCK(&local->fd->lock);
+ if (!fd_ctx) {
+ goto cleanup;
+ }
+
+ LOCK(&priv->lock);
+ {
+ list_add_tail(&info->pos, &priv->saved_locks);
+ }
+ UNLOCK(&priv->lock);
+
+ return 0;
+cleanup:
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to add lock to healq",
+ uuid_utoa(local->fd->inode->gfid));
+ if (info) {
+ afr_lk_heal_info_cleanup(info);
+ if (fd_ctx) {
+ LOCK(&local->fd->lock);
+ {
+ fd_ctx->lk_heal_info = NULL;
+ }
+ UNLOCK(&local->fd->lock);
}
+ }
+ return ret;
+}
- *ctx = tmp_ctx;
- ret = 0;
+static int
+afr_remove_lock_from_saved_locks(afr_local_t *local, xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ struct gf_flock flock = local->cont.lk.user_flock;
+ afr_lk_heal_info_t *info = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int ret = -EINVAL;
+
+ fd_ctx = afr_fd_ctx_get(local->fd, this);
+ if (!fd_ctx || !fd_ctx->lk_heal_info) {
+ goto out;
+ }
+
+ info = fd_ctx->lk_heal_info;
+ if ((info->flock.l_start != flock.l_start) ||
+ (info->flock.l_whence != flock.l_whence) ||
+ (info->flock.l_len != flock.l_len)) {
+ /*TODO: Compare lkowners too.*/
+ goto out;
+ }
+
+ LOCK(&priv->lock);
+ {
+ list_del(&fd_ctx->lk_heal_info->pos);
+ }
+ UNLOCK(&priv->lock);
+
+ afr_lk_heal_info_cleanup(info);
+ fd_ctx->lk_heal_info = NULL;
+ ret = 0;
out:
- return ret;
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_LK_HEAL_DOM,
+ "%s: Failed to remove lock from healq",
+ uuid_utoa(local->fd->inode->gfid));
+ return ret;
+}
+
+int
+afr_lock_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ int i = (long)cookie;
+
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+ if (op_ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "Failed to heal lock on child %d for %s", i,
+ uuid_utoa(local->fd->inode->gfid));
+ }
+ syncbarrier_wake(&local->barrier);
+ return 0;
+}
+
+int
+afr_getlk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ int i = (long)cookie;
+
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+ if (op_ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "Failed getlk for %s", uuid_utoa(local->fd->inode->gfid));
+ } else {
+ local->cont.lk.getlk_rsp[i] = *lock;
+ }
+
+ syncbarrier_wake(&local->barrier);
+ return 0;
+}
+
+static gf_boolean_t
+afr_does_lk_owner_match(call_frame_t *frame, afr_private_t *priv,
+ afr_lk_heal_info_t *info)
+{
+ int i = 0;
+ afr_local_t *local = frame->local;
+ struct gf_flock flock = {
+ 0,
+ };
+ gf_boolean_t ret = _gf_true;
+ char *wind_on = alloca0(priv->child_count);
+ unsigned char *success_replies = alloca0(priv->child_count);
+ local->cont.lk.getlk_rsp = GF_CALLOC(sizeof(*local->cont.lk.getlk_rsp),
+ priv->child_count, gf_afr_mt_gf_lock);
+
+ flock = info->flock;
+ for (i = 0; i < priv->child_count; i++) {
+ if (info->locked_nodes[i])
+ wind_on[i] = 1;
+ }
+
+ AFR_ONLIST(wind_on, frame, afr_getlk_cbk, lk, info->fd, F_GETLK, &flock,
+ info->xdata_req);
+
+ afr_fill_success_replies(local, priv, success_replies);
+ if (AFR_COUNT(success_replies, priv->child_count) == 0) {
+ ret = _gf_false;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid || local->replies[i].op_ret != 0)
+ continue;
+ if (local->cont.lk.getlk_rsp[i].l_type == F_UNLCK)
+ continue;
+ /*TODO: Do we really need to compare lkowner if F_UNLCK is true?*/
+ if (!is_same_lkowner(&local->cont.lk.getlk_rsp[i].l_owner,
+ &info->lk_owner)) {
+ ret = _gf_false;
+ break;
+ }
+ }
+out:
+ afr_local_replies_wipe(local, priv);
+ GF_FREE(local->cont.lk.getlk_rsp);
+ local->cont.lk.getlk_rsp = NULL;
+ return ret;
+}
+
+static void
+afr_mark_fd_bad(fd_t *fd, xlator_t *this)
+{
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ if (!fd)
+ return;
+ LOCK(&fd->lock);
+ {
+ fd_ctx = __afr_fd_ctx_get(fd, this);
+ if (fd_ctx) {
+ fd_ctx->is_fd_bad = _gf_true;
+ fd_ctx->lk_heal_info = NULL;
+ }
+ }
+ UNLOCK(&fd->lock);
+}
+
+static void
+afr_add_lock_to_lkhealq(afr_private_t *priv, afr_lk_heal_info_t *info)
+{
+ LOCK(&priv->lock);
+ {
+ list_del(&info->pos);
+ list_add_tail(&info->pos, &priv->lk_healq);
+ }
+ UNLOCK(&priv->lock);
}
+
+static void
+afr_lock_heal_do(call_frame_t *frame, afr_private_t *priv,
+ afr_lk_heal_info_t *info)
+{
+ int i = 0;
+ int op_errno = 0;
+ int32_t *current_event_gen = NULL;
+ afr_local_t *local = frame->local;
+ xlator_t *this = frame->this;
+ char *wind_on = alloca0(priv->child_count);
+ gf_boolean_t retry = _gf_true;
+
+ frame->root->pid = info->pid;
+ lk_owner_copy(&frame->root->lk_owner, &info->lk_owner);
+
+ op_errno = -afr_dom_lock_acquire(frame);
+ if ((op_errno != 0)) {
+ goto release;
+ }
+
+ if (!afr_does_lk_owner_match(frame, priv, info)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_LK_HEAL_DOM,
+ "Ignoring lock heal for %s since lk-onwers mismatch. "
+ "Lock possibly pre-empted by another client.",
+ uuid_utoa(info->fd->inode->gfid));
+ goto release;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (info->locked_nodes[i])
+ continue;
+ wind_on[i] = 1;
+ }
+
+ current_event_gen = alloca(priv->child_count);
+ memcpy(current_event_gen, info->child_up_event_gen,
+ priv->child_count * sizeof *current_event_gen);
+ AFR_ONLIST(wind_on, frame, afr_lock_heal_cbk, lk, info->fd, info->cmd,
+ &info->flock, info->xdata_req);
+
+ LOCK(&priv->lock);
+ {
+ for (i = 0; i < priv->child_count; i++) {
+ if (!wind_on[i])
+ continue;
+ if ((!local->replies[i].valid) || (local->replies[i].op_ret != 0)) {
+ continue;
+ }
+
+ if ((current_event_gen[i] == info->child_up_event_gen[i]) &&
+ (current_event_gen[i] > info->child_down_event_gen[i])) {
+ info->locked_nodes[i] = 1;
+ retry = _gf_false;
+ list_del_init(&info->pos);
+ list_add_tail(&info->pos, &priv->saved_locks);
+ } else {
+ /*We received subsequent child up/down events while heal was in
+ * progress; don't mark child as healed. Attempt again on the
+ * new child up*/
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_LK_HEAL_DOM,
+ "Event gen mismatch: skipped healing lock on child %d "
+ "for %s.",
+ i, uuid_utoa(info->fd->inode->gfid));
+ }
+ }
+ }
+ UNLOCK(&priv->lock);
+
+release:
+ afr_dom_lock_release(frame);
+ if (retry)
+ afr_add_lock_to_lkhealq(priv, info);
+ return;
+}
+
+static int
+afr_lock_heal_done(int ret, call_frame_t *frame, void *opaque)
+{
+ STACK_DESTROY(frame->root);
+ return 0;
+}
+
+static int
+afr_lock_heal(void *opaque)
+{
+ call_frame_t *frame = (call_frame_t *)opaque;
+ call_frame_t *iter_frame = NULL;
+ xlator_t *this = frame->this;
+ afr_private_t *priv = this->private;
+ afr_lk_heal_info_t *info = NULL;
+ afr_lk_heal_info_t *tmp = NULL;
+ struct list_head healq = {
+ 0,
+ };
+ int ret = 0;
+
+ iter_frame = afr_copy_frame(frame);
+ if (!iter_frame) {
+ return ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&healq);
+ LOCK(&priv->lock);
+ {
+ list_splice_init(&priv->lk_healq, &healq);
+ }
+ UNLOCK(&priv->lock);
+
+ list_for_each_entry_safe(info, tmp, &healq, pos)
+ {
+ GF_ASSERT((AFR_COUNT(info->locked_nodes, priv->child_count) <
+ priv->child_count));
+ ((afr_local_t *)(iter_frame->local))->fd = fd_ref(info->fd);
+ afr_lock_heal_do(iter_frame, priv, info);
+ AFR_STACK_RESET(iter_frame);
+ if (iter_frame->local == NULL) {
+ ret = ENOTCONN;
+ gf_msg(frame->this->name, GF_LOG_ERROR, ENOTCONN,
+ AFR_MSG_LK_HEAL_DOM,
+ "Aborting processing of lk_healq."
+ "Healing will be reattempted on next child up for locks "
+ "that are still in quorum.");
+ LOCK(&priv->lock);
+ {
+ list_add_tail(&healq, &priv->lk_healq);
+ }
+ UNLOCK(&priv->lock);
+ break;
+ }
+ }
+
+ AFR_STACK_DESTROY(iter_frame);
+ return ret;
+}
+
+static int
+__afr_lock_heal_synctask(xlator_t *this, afr_private_t *priv, int child)
+{
+ int ret = 0;
+ call_frame_t *frame = NULL;
+ afr_lk_heal_info_t *info = NULL;
+ afr_lk_heal_info_t *tmp = NULL;
+
+ if (priv->shd.iamshd)
+ return 0;
+
+ list_for_each_entry_safe(info, tmp, &priv->saved_locks, pos)
+ {
+ info->child_up_event_gen[child] = priv->event_generation;
+ list_del_init(&info->pos);
+ list_add_tail(&info->pos, &priv->lk_healq);
+ }
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame)
+ return -1;
+
+ ret = synctask_new(this->ctx->env, afr_lock_heal, afr_lock_heal_done, frame,
+ frame);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_LK_HEAL_DOM,
+ "Failed to launch lock heal synctask");
+
+ return ret;
+}
+
+static int
+__afr_mark_pending_lk_heal(xlator_t *this, afr_private_t *priv, int child)
+{
+ afr_lk_heal_info_t *info = NULL;
+ afr_lk_heal_info_t *tmp = NULL;
+
+ if (priv->shd.iamshd)
+ return 0;
+ list_for_each_entry_safe(info, tmp, &priv->saved_locks, pos)
+ {
+ info->child_down_event_gen[child] = priv->event_generation;
+ if (info->locked_nodes[child] == 1)
+ info->locked_nodes[child] = 0;
+ if (!afr_has_quorum(info->locked_nodes, this, NULL)) {
+ /* Since the lock was lost on quorum no. of nodes, we should
+ * not attempt to heal it anymore. Some other client could have
+ * acquired the lock, modified data and released it and this
+ * client wouldn't know about it if we heal it.*/
+ afr_mark_fd_bad(info->fd, this);
+ list_del(&info->pos);
+ afr_lk_heal_info_cleanup(info);
+ /* We're not winding an unlock on the node where the lock is still
+ * present because when fencing logic switches over to the new
+ * client (since we marked the fd bad), it should preempt any
+ * existing lock. */
+ }
+ }
+ return 0;
+}
+
+gf_boolean_t
+afr_is_consistent_io_possible(afr_local_t *local, afr_private_t *priv,
+ int32_t *op_errno)
+{
+ if (priv->consistent_io && local->call_count != priv->child_count) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, AFR_MSG_SUBVOLS_DOWN,
+ "All subvolumes are not up");
+ if (op_errno)
+ *op_errno = ENOTCONN;
+ return _gf_false;
+ }
+ return _gf_true;
+}
+
+gf_boolean_t
+afr_is_lock_mode_mandatory(dict_t *xdata)
+{
+ int ret = 0;
+ uint32_t lk_mode = GF_LK_ADVISORY;
+
+ ret = dict_get_uint32(xdata, GF_LOCK_MODE, &lk_mode);
+ if (!ret && lk_mode == GF_LK_MANDATORY)
+ return _gf_true;
+
+ return _gf_false;
+}
+
+call_frame_t *
+afr_copy_frame(call_frame_t *base)
+{
+ afr_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ int op_errno = 0;
+
+ frame = copy_frame(base);
+ if (!frame)
+ return NULL;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local) {
+ AFR_STACK_DESTROY(frame);
+ return NULL;
+ }
+
+ return frame;
+}
+
+/* Check if an entry or inode could be undergoing a transaction. */
+gf_boolean_t
+afr_is_possibly_under_txn(afr_transaction_type type, afr_local_t *local,
+ xlator_t *this)
+{
+ int i = 0;
+ int tmp = 0;
+ afr_private_t *priv = NULL;
+ GF_UNUSED char *key = NULL;
+ int keylen = 0;
+
+ priv = this->private;
+
+ if (type == AFR_ENTRY_TRANSACTION) {
+ key = GLUSTERFS_PARENT_ENTRYLK;
+ keylen = SLEN(GLUSTERFS_PARENT_ENTRYLK);
+ } else if (type == AFR_DATA_TRANSACTION) {
+ /*FIXME: Use GLUSTERFS_INODELK_DOM_COUNT etc. once
+ * pl_inodelk_xattr_fill supports separate keys for different
+ * domains.*/
+ key = GLUSTERFS_INODELK_COUNT;
+ keylen = SLEN(GLUSTERFS_INODELK_COUNT);
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].xdata)
+ continue;
+ if (dict_get_int32n(local->replies[i].xdata, key, keylen, &tmp) == 0)
+ if (tmp)
+ return _gf_true;
+ }
+
+ return _gf_false;
+}
+
+static void
+afr_inode_ctx_destroy(afr_inode_ctx_t *ctx)
+{
+ int i = 0;
+
+ if (!ctx)
+ return;
+
+ for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++) {
+ GF_FREE(ctx->pre_op_done[i]);
+ }
+
+ GF_FREE(ctx);
+}
+
+int
+__afr_inode_ctx_get(xlator_t *this, inode_t *inode, afr_inode_ctx_t **ctx)
+{
+ uint64_t ctx_int = 0;
+ int ret = -1;
+ int i = -1;
+ int num_locks = -1;
+ afr_inode_ctx_t *ictx = NULL;
+ afr_lock_t *lock = NULL;
+ afr_private_t *priv = this->private;
+
+ ret = __inode_ctx_get(inode, this, &ctx_int);
+ if (ret == 0) {
+ *ctx = (afr_inode_ctx_t *)(uintptr_t)ctx_int;
+ return 0;
+ }
+
+ ictx = GF_CALLOC(1, sizeof(afr_inode_ctx_t), gf_afr_mt_inode_ctx_t);
+ if (!ictx)
+ goto out;
+
+ for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++) {
+ ictx->pre_op_done[i] = GF_CALLOC(sizeof *ictx->pre_op_done[i],
+ priv->child_count, gf_afr_mt_int32_t);
+ if (!ictx->pre_op_done[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ num_locks = sizeof(ictx->lock) / sizeof(afr_lock_t);
+ for (i = 0; i < num_locks; i++) {
+ lock = &ictx->lock[i];
+ INIT_LIST_HEAD(&lock->post_op);
+ INIT_LIST_HEAD(&lock->frozen);
+ INIT_LIST_HEAD(&lock->waiting);
+ INIT_LIST_HEAD(&lock->owners);
+ }
+
+ ctx_int = (uint64_t)(uintptr_t)ictx;
+ ret = __inode_ctx_set(inode, this, &ctx_int);
+ if (ret) {
+ goto out;
+ }
+
+ ictx->spb_choice = -1;
+ ictx->read_subvol = 0;
+ ictx->write_subvol = 0;
+ ictx->lock_count = 0;
+ ret = 0;
+ *ctx = ictx;
+out:
+ if (ret) {
+ afr_inode_ctx_destroy(ictx);
+ }
+ return ret;
+}
+
/*
* INODE CTX 64-bit VALUE FORMAT FOR SMALL (<= 16) SUBVOL COUNTS:
*
@@ -155,1659 +863,2258 @@ out:
*/
int
-__afr_inode_read_subvol_get_small (inode_t *inode, xlator_t *this,
- unsigned char *data, unsigned char *metadata,
- int *event_p)
-{
- afr_private_t *priv = NULL;
- int ret = -1;
- uint16_t datamap = 0;
- uint16_t metadatamap = 0;
- uint32_t event = 0;
- uint64_t val = 0;
- int i = 0;
- afr_inode_ctx_t *ctx = NULL;
+__afr_set_in_flight_sb_status(xlator_t *this, afr_local_t *local,
+ inode_t *inode)
+{
+ int i = 0;
+ int txn_type = 0;
+ int count = 0;
+ int index = -1;
+ uint16_t datamap_old = 0;
+ uint16_t metadatamap_old = 0;
+ uint16_t datamap = 0;
+ uint16_t metadatamap = 0;
+ uint16_t tmp_map = 0;
+ uint16_t mask = 0;
+ uint32_t event = 0;
+ uint64_t val = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ txn_type = local->transaction.type;
+
+ if (txn_type == AFR_DATA_TRANSACTION)
+ val = local->inode_ctx->write_subvol;
+ else
+ val = local->inode_ctx->read_subvol;
+
+ metadatamap_old = metadatamap = (val & 0x000000000000ffff);
+ datamap_old = datamap = (val & 0x00000000ffff0000) >> 16;
+ event = (val & 0xffffffff00000000) >> 32;
+
+ if (txn_type == AFR_DATA_TRANSACTION)
+ tmp_map = datamap;
+ else if (txn_type == AFR_METADATA_TRANSACTION)
+ tmp_map = metadatamap;
+
+ count = gf_bits_count(tmp_map);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.failed_subvols[i])
+ continue;
+
+ mask = 1 << i;
+ if (txn_type == AFR_METADATA_TRANSACTION)
+ metadatamap &= ~mask;
+ else if (txn_type == AFR_DATA_TRANSACTION)
+ datamap &= ~mask;
+ }
+
+ switch (txn_type) {
+ case AFR_METADATA_TRANSACTION:
+ if ((metadatamap_old != 0) && (metadatamap == 0) && (count == 1)) {
+ index = gf_bits_index(tmp_map);
+ local->transaction.in_flight_sb_errno = local->replies[index]
+ .op_errno;
+ local->transaction.in_flight_sb = _gf_true;
+ metadatamap |= (1 << index);
+ }
+ if (metadatamap_old != metadatamap) {
+ __afr_inode_need_refresh_set(inode, this);
+ }
+ break;
+
+ case AFR_DATA_TRANSACTION:
+ if ((datamap_old != 0) && (datamap == 0) && (count == 1)) {
+ index = gf_bits_index(tmp_map);
+ local->transaction.in_flight_sb_errno = local->replies[index]
+ .op_errno;
+ local->transaction.in_flight_sb = _gf_true;
+ datamap |= (1 << index);
+ }
+ if (datamap_old != datamap)
+ __afr_inode_need_refresh_set(inode, this);
+ break;
- priv = this->private;
+ default:
+ break;
+ }
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret < 0)
- return ret;
+ val = ((uint64_t)metadatamap) | (((uint64_t)datamap) << 16) |
+ (((uint64_t)event) << 32);
- val = ctx->read_subvol;
+ if (txn_type == AFR_DATA_TRANSACTION)
+ local->inode_ctx->write_subvol = val;
+ local->inode_ctx->read_subvol = val;
- metadatamap = (val & 0x000000000000ffff);
- datamap = (val & 0x00000000ffff0000) >> 16;
- event = (val & 0xffffffff00000000) >> 32;
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (metadata)
- metadata[i] = (metadatamap >> i) & 1;
- if (data)
- data[i] = (datamap >> i) & 1;
- }
+gf_boolean_t
+afr_is_symmetric_error(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int op_errno = 0;
+ int i_errno = 0;
+ gf_boolean_t matching_errors = _gf_true;
+ int i = 0;
- if (event_p)
- *event_p = event;
- return ret;
-}
+ priv = this->private;
+ local = frame->local;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret != -1) {
+ /* Operation succeeded on at least one subvol,
+ so it is not a failed-everywhere situation.
+ */
+ matching_errors = _gf_false;
+ break;
+ }
+ i_errno = local->replies[i].op_errno;
+
+ if (i_errno == ENOTCONN) {
+ /* ENOTCONN is not a symmetric error. We do not
+ know if the operation was performed on the
+ backend or not.
+ */
+ matching_errors = _gf_false;
+ break;
+ }
+ if (!op_errno) {
+ op_errno = i_errno;
+ } else if (op_errno != i_errno) {
+ /* Mismatching op_errno's */
+ matching_errors = _gf_false;
+ break;
+ }
+ }
+
+ return matching_errors;
+}
int
-__afr_inode_read_subvol_set_small (inode_t *inode, xlator_t *this,
- unsigned char *data, unsigned char *metadata,
- int event)
+afr_set_in_flight_sb_status(xlator_t *this, call_frame_t *frame, inode_t *inode)
{
- afr_private_t *priv = NULL;
- uint16_t datamap = 0;
- uint16_t metadatamap = 0;
- uint64_t val = 0;
- int i = 0;
- int ret = -1;
- afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
+ local = frame->local;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret)
- goto out;
+ /* If this transaction saw no failures, then exit. */
+ if (AFR_COUNT(local->transaction.failed_subvols, priv->child_count) == 0)
+ return 0;
- for (i = 0; i < priv->child_count; i++) {
- if (data[i])
- datamap |= (1 << i);
- if (metadata[i])
- metadatamap |= (1 << i);
- }
+ if (afr_is_symmetric_error(frame, this))
+ return 0;
- val = ((uint64_t) metadatamap) |
- (((uint64_t) datamap) << 16) |
- (((uint64_t) event) << 32);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_set_in_flight_sb_status(this, local, inode);
+ }
+ UNLOCK(&inode->lock);
- ctx->read_subvol = val;
+ return ret;
+}
- ret = 0;
-out:
+int
+__afr_inode_read_subvol_get_small(inode_t *inode, xlator_t *this,
+ unsigned char *data, unsigned char *metadata,
+ int *event_p)
+{
+ afr_private_t *priv = NULL;
+ int ret = -1;
+ uint16_t datamap = 0;
+ uint16_t metadatamap = 0;
+ uint32_t event = 0;
+ uint64_t val = 0;
+ int i = 0;
+ afr_inode_ctx_t *ctx = NULL;
+
+ priv = this->private;
+
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret < 0)
return ret;
+
+ val = ctx->read_subvol;
+
+ metadatamap = (val & 0x000000000000ffff);
+ datamap = (val & 0x00000000ffff0000) >> 16;
+ event = (val & 0xffffffff00000000) >> 32;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (metadata)
+ metadata[i] = (metadatamap >> i) & 1;
+ if (data)
+ data[i] = (datamap >> i) & 1;
+ }
+
+ if (event_p)
+ *event_p = event;
+ return ret;
}
int
-__afr_inode_read_subvol_reset_small (inode_t *inode, xlator_t *this)
+__afr_inode_read_subvol_set_small(inode_t *inode, xlator_t *this,
+ unsigned char *data, unsigned char *metadata,
+ int event)
{
- int ret = -1;
- uint16_t datamap = 0;
- uint16_t metadatamap = 0;
- uint32_t event = 0;
- uint64_t val = 0;
- afr_inode_ctx_t *ctx = NULL;
+ afr_private_t *priv = NULL;
+ uint16_t datamap = 0;
+ uint16_t metadatamap = 0;
+ uint64_t val = 0;
+ int i = 0;
+ int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret)
- return ret;
+ priv = this->private;
- val = ctx->read_subvol;
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret)
+ goto out;
- metadatamap = (val & 0x000000000000ffff) >> 0;
- datamap = (val & 0x00000000ffff0000) >> 16;
- event = 0;
+ for (i = 0; i < priv->child_count; i++) {
+ if (data[i])
+ datamap |= (1 << i);
+ if (metadata[i])
+ metadatamap |= (1 << i);
+ }
- val = ((uint64_t) metadatamap) |
- (((uint64_t) datamap) << 16) |
- (((uint64_t) event) << 32);
+ val = ((uint64_t)metadatamap) | (((uint64_t)datamap) << 16) |
+ (((uint64_t)event) << 32);
- ctx->read_subvol = val;
+ ctx->read_subvol = val;
- return ret;
+ ret = 0;
+out:
+ return ret;
}
-
int
-__afr_inode_read_subvol_get (inode_t *inode, xlator_t *this,
- unsigned char *data, unsigned char *metadata,
- int *event_p)
+__afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int *event_p)
{
- afr_private_t *priv = NULL;
- int ret = -1;
+ afr_private_t *priv = NULL;
+ int ret = -1;
- priv = this->private;
+ priv = this->private;
- if (priv->child_count <= 16)
- ret = __afr_inode_read_subvol_get_small (inode, this, data,
- metadata, event_p);
- else
- /* TBD: allocate structure with array and read from it */
- ret = -1;
+ if (priv->child_count <= 16)
+ ret = __afr_inode_read_subvol_get_small(inode, this, data, metadata,
+ event_p);
+ else
+ /* TBD: allocate structure with array and read from it */
+ ret = -1;
- return ret;
+ return ret;
}
int
-__afr_inode_split_brain_choice_get (inode_t *inode, xlator_t *this,
- int *spb_choice)
+__afr_inode_split_brain_choice_get(inode_t *inode, xlator_t *this,
+ int *spb_choice)
{
- afr_inode_ctx_t *ctx = NULL;
- int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret < 0)
- return ret;
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret < 0)
+ return ret;
- *spb_choice = ctx->spb_choice;
- return 0;
+ *spb_choice = ctx->spb_choice;
+ return 0;
}
int
-__afr_inode_read_subvol_set (inode_t *inode, xlator_t *this, unsigned char *data,
- unsigned char *metadata, int event)
+__afr_inode_read_subvol_set(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int event)
{
- afr_private_t *priv = NULL;
- int ret = -1;
+ afr_private_t *priv = NULL;
+ int ret = -1;
- priv = this->private;
+ priv = this->private;
- if (priv->child_count <= 16)
- ret = __afr_inode_read_subvol_set_small (inode, this, data,
- metadata, event);
- else
- ret = -1;
+ if (priv->child_count <= 16)
+ ret = __afr_inode_read_subvol_set_small(inode, this, data, metadata,
+ event);
+ else
+ ret = -1;
- return ret;
+ return ret;
}
int
-__afr_inode_split_brain_choice_set (inode_t *inode, xlator_t *this,
- int spb_choice)
+__afr_inode_split_brain_choice_set(inode_t *inode, xlator_t *this,
+ int spb_choice)
{
- afr_inode_ctx_t *ctx = NULL;
- int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret)
- goto out;
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret)
+ goto out;
- ctx->spb_choice = spb_choice;
+ ctx->spb_choice = spb_choice;
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
int
-__afr_inode_read_subvol_reset (inode_t *inode, xlator_t *this)
+afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int *event_p)
{
- afr_private_t *priv = NULL;
- int ret = -1;
-
- priv = this->private;
+ int ret = -1;
- if (priv->child_count <= 16)
- ret = __afr_inode_read_subvol_reset_small (inode, this);
- else
- ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- return ret;
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_read_subvol_get(inode, this, data, metadata, event_p);
+ }
+ UNLOCK(&inode->lock);
+out:
+ return ret;
}
-
int
-afr_inode_read_subvol_get (inode_t *inode, xlator_t *this, unsigned char *data,
- unsigned char *metadata, int *event_p)
+afr_inode_get_readable(call_frame_t *frame, inode_t *inode, xlator_t *this,
+ unsigned char *readable, int *event_p, int type)
{
- int ret = -1;
+ afr_private_t *priv = this->private;
+ afr_local_t *local = frame->local;
+ unsigned char *data = alloca0(priv->child_count);
+ unsigned char *metadata = alloca0(priv->child_count);
+ int data_count = 0;
+ int metadata_count = 0;
+ int event_generation = 0;
+ int ret = 0;
+
+ ret = afr_inode_read_subvol_get(inode, this, data, metadata,
+ &event_generation);
+ if (ret == -1)
+ return -EIO;
+
+ data_count = AFR_COUNT(data, priv->child_count);
+ metadata_count = AFR_COUNT(metadata, priv->child_count);
+
+ if (inode->ia_type == IA_IFDIR) {
+ /* For directories, allow even if it is in data split-brain. */
+ if (type == AFR_METADATA_TRANSACTION || local->op == GF_FOP_STAT ||
+ local->op == GF_FOP_FSTAT) {
+ if (!metadata_count)
+ return -EIO;
+ }
+ } else {
+ /* For files, abort in case of data/metadata split-brain. */
+ if (!data_count || !metadata_count) {
+ return -EIO;
+ }
+ }
+
+ if (type == AFR_METADATA_TRANSACTION && readable)
+ memcpy(readable, metadata, priv->child_count * sizeof *metadata);
+ if (type == AFR_DATA_TRANSACTION && readable) {
+ if (!data_count)
+ memcpy(readable, local->child_up,
+ priv->child_count * sizeof *readable);
+ else
+ memcpy(readable, data, priv->child_count * sizeof *data);
+ }
+ if (event_p)
+ *event_p = event_generation;
+ return 0;
+}
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+static int
+afr_inode_split_brain_choice_get(inode_t *inode, xlator_t *this,
+ int *spb_choice)
+{
+ int ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_read_subvol_get (inode, this, data,
- metadata, event_p);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_split_brain_choice_get(inode, this, spb_choice);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
+/*
+ * frame is used to get the favourite policy. Since
+ * afr_inode_split_brain_choice_get was called with afr_open, it is possible to
+ * have a frame with out local->replies. So in that case, frame is passed as
+ * null, hence this function will handle the frame NULL case.
+ */
int
-afr_inode_get_readable (call_frame_t *frame, inode_t *inode, xlator_t *this,
- unsigned char *readable, int *event_p, int type)
+afr_split_brain_read_subvol_get(inode_t *inode, xlator_t *this,
+ call_frame_t *frame, int *spb_subvol)
{
+ int ret = -1;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- afr_private_t *priv = this->private;
- afr_local_t *local = frame->local;
- unsigned char *data = alloca0 (priv->child_count);
- unsigned char *metadata = alloca0 (priv->child_count);
- int data_count = 0;
- int metadata_count = 0;
- int event_generation = 0;
- int ret = 0;
+ GF_VALIDATE_OR_GOTO("afr", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, spb_subvol, out);
- /* We don't care about split-brains for entry transactions. */
- if (type == AFR_ENTRY_TRANSACTION || type == AFR_ENTRY_RENAME_TRANSACTION)
- return 0;
-
- ret = afr_inode_read_subvol_get (inode, this, data, metadata,
- &event_generation);
- if (ret == -1)
- return -EIO;
+ priv = this->private;
- data_count = AFR_COUNT (data, priv->child_count);
- metadata_count = AFR_COUNT (metadata, priv->child_count);
+ ret = afr_inode_split_brain_choice_get(inode, this, spb_subvol);
+ if (*spb_subvol < 0 && priv->fav_child_policy && frame && frame->local) {
+ local = frame->local;
+ *spb_subvol = afr_sh_get_fav_by_policy(this, local->replies, inode,
+ NULL);
+ if (*spb_subvol >= 0) {
+ ret = 0;
+ }
+ }
- if (inode->ia_type == IA_IFDIR) {
- /* For directories, allow even if it is in data split-brain. */
- if (type == AFR_METADATA_TRANSACTION ||
- local->op == GF_FOP_STAT || local->op == GF_FOP_FSTAT) {
- if (!metadata_count)
- return -EIO;
- }
- } else {
- /* For files, abort in case of data/metadata split-brain. */
- if (!data_count || !metadata_count)
- return -EIO;
- }
-
- if (type == AFR_METADATA_TRANSACTION && readable)
- memcpy (readable, metadata, priv->child_count * sizeof *metadata);
- if (type == AFR_DATA_TRANSACTION && readable) {
- if (!data_count)
- memcpy (readable, local->child_up,
- priv->child_count * sizeof *readable);
- else
- memcpy (readable, data, priv->child_count * sizeof *data);
- }
- if (event_p)
- *event_p = event_generation;
- return 0;
+out:
+ return ret;
}
-
int
-afr_inode_split_brain_choice_get (inode_t *inode, xlator_t *this,
- int *spb_choice)
+afr_inode_read_subvol_set(inode_t *inode, xlator_t *this, unsigned char *data,
+ unsigned char *metadata, int event)
{
- int ret = -1;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_split_brain_choice_get (inode, this,
- spb_choice);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_read_subvol_set(inode, this, data, metadata, event);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
-
int
-afr_inode_read_subvol_set (inode_t *inode, xlator_t *this, unsigned char *data,
- unsigned char *metadata, int event)
+afr_inode_split_brain_choice_set(inode_t *inode, xlator_t *this, int spb_choice)
{
- int ret = -1;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_read_subvol_set (inode, this, data, metadata,
- event);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_split_brain_choice_set(inode, this, spb_choice);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
-
-int
-afr_inode_split_brain_choice_set (inode_t *inode, xlator_t *this,
- int spb_choice)
+/* The caller of this should perform afr_inode_refresh, if this function
+ * returns _gf_true
+ */
+gf_boolean_t
+afr_is_inode_refresh_reqd(inode_t *inode, xlator_t *this, int event_gen1,
+ int event_gen2)
{
- int ret = -1;
+ gf_boolean_t need_refresh = _gf_false;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_split_brain_choice_set (inode, this,
- spb_choice);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret)
+ goto unlock;
+
+ need_refresh = ctx->need_refresh;
+ /* Hoping that the caller will do inode_refresh followed by
+ * this, hence setting the need_refresh to false */
+ ctx->need_refresh = _gf_false;
+ }
+unlock:
+ UNLOCK(&inode->lock);
+
+ if (event_gen1 != event_gen2)
+ need_refresh = _gf_true;
out:
- return ret;
+ return need_refresh;
}
+int
+__afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
+{
+ int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret == 0) {
+ ctx->need_refresh = _gf_true;
+ }
+
+ return ret;
+}
int
-afr_inode_read_subvol_reset (inode_t *inode, xlator_t *this)
+afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
{
- int ret = -1;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
- LOCK(&inode->lock);
- {
- ret = __afr_inode_read_subvol_reset (inode, this);
- }
- UNLOCK(&inode->lock);
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_need_refresh_set(inode, this);
+ }
+ UNLOCK(&inode->lock);
out:
- return ret;
+ return ret;
}
int
-afr_spb_choice_timeout_cancel (xlator_t *this, inode_t *inode)
+afr_spb_choice_timeout_cancel(xlator_t *this, inode_t *inode)
{
- afr_inode_ctx_t *ctx = NULL;
- int ret = -1;
+ afr_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- if (!inode)
- return ret;
+ if (!inode)
+ return ret;
- LOCK(&inode->lock);
- {
- __afr_inode_ctx_get (this, inode, &ctx);
- if (!ctx) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
- "Failed to cancel split-brain choice timer.");
- goto out;
- }
- ctx->spb_choice = -1;
- if (ctx->timer) {
- gf_timer_call_cancel (this->ctx, ctx->timer);
- ctx->timer = NULL;
- }
- ret = 0;
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret < 0 || !ctx) {
+ UNLOCK(&inode->lock);
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
+ "Failed to cancel split-brain choice timer.");
+ goto out;
+ }
+ ctx->spb_choice = -1;
+ if (ctx->timer) {
+ gf_timer_call_cancel(this->ctx, ctx->timer);
+ ctx->timer = NULL;
}
+ ret = 0;
+ }
+ UNLOCK(&inode->lock);
out:
- UNLOCK(&inode->lock);
- return ret;
+ return ret;
}
void
-afr_set_split_brain_choice_cbk (void *data)
+afr_set_split_brain_choice_cbk(void *data)
{
- inode_t *inode = data;
- xlator_t *this = THIS;
+ inode_t *inode = data;
+ xlator_t *this = THIS;
- afr_spb_choice_timeout_cancel (this, inode);
- inode_unref (inode);
- return;
+ afr_spb_choice_timeout_cancel(this, inode);
+ inode_invalidate(inode);
+ inode_unref(inode);
+ return;
}
-
int
-afr_set_split_brain_choice (int ret, call_frame_t *frame, void *opaque)
-{
- int op_errno = ENOMEM;
- afr_private_t *priv = NULL;
- afr_inode_ctx_t *ctx = NULL;
- inode_t *inode = NULL;
- loc_t *loc = NULL;
- xlator_t *this = NULL;
- afr_spbc_timeout_t *data = opaque;
- struct timespec delta = {0, };
-
- if (ret)
- goto out;
-
- frame = data->frame;
- loc = data->loc;
- this = frame->this;
- priv = this->private;
-
- delta.tv_sec = priv->spb_choice_timeout;
- delta.tv_nsec = 0;
-
- inode = loc->inode;
- if (!inode)
- goto out;
-
- if (!(data->d_spb || data->m_spb)) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR, "Cannot set "
- "replica.split-brain-choice on %s. File is"
- " not in data/metadata split-brain.",
- uuid_utoa (loc->gfid));
- ret = -1;
- op_errno = EINVAL;
- goto out;
+afr_set_split_brain_choice(int ret, call_frame_t *frame, void *opaque)
+{
+ int op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ afr_inode_ctx_t *ctx = NULL;
+ inode_t *inode = NULL;
+ loc_t *loc = NULL;
+ xlator_t *this = NULL;
+ afr_spbc_timeout_t *data = opaque;
+ struct timespec delta = {
+ 0,
+ };
+ gf_boolean_t timer_set = _gf_false;
+ gf_boolean_t timer_cancelled = _gf_false;
+ gf_boolean_t timer_reset = _gf_false;
+ int old_spb_choice = -1;
+
+ frame = data->frame;
+ loc = data->loc;
+ this = frame->this;
+ priv = this->private;
+
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+
+ delta.tv_sec = priv->spb_choice_timeout;
+ delta.tv_nsec = 0;
+
+ if (!loc->inode) {
+ ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ if (!(data->d_spb || data->m_spb)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
+ "Cannot set "
+ "replica.split-brain-choice on %s. File is"
+ " not in data/metadata split-brain.",
+ uuid_utoa(loc->gfid));
+ ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ /*
+ * we're ref'ing the inode before LOCK like it is done elsewhere in the
+ * code. If we ref after LOCK, coverity complains of possible deadlocks.
+ */
+ inode = inode_ref(loc->inode);
+
+ LOCK(&inode->lock);
+ {
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret) {
+ UNLOCK(&inode->lock);
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
+ "Failed to get inode_ctx for %s", loc->name);
+ goto post_unlock;
}
- LOCK(&inode->lock);
- {
- ret = __afr_inode_ctx_get (this, inode, &ctx);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
- "Failed to get inode_ctx for %s", loc->name);
- goto unlock;
- }
-
- ctx->spb_choice = data->spb_child_index;
+ old_spb_choice = ctx->spb_choice;
+ ctx->spb_choice = data->spb_child_index;
- /* Possible changes in spb-choice :
- * -1 to valid : ref and inject timer
- *
- * valid to valid : cancel timer and inject new one
- *
- * valid to -1 : cancel timer and unref
- *
- * -1 to -1 : do not do anything
- */
+ /* Possible changes in spb-choice :
+ * valid to -1 : cancel timer and unref
+ * valid to valid : cancel timer and inject new one
+ * -1 to -1 : unref and do not do anything
+ * -1 to valid : inject timer
+ */
- /* ctx->timer is NULL iff previous value of
- * ctx->spb_choice is -1
- */
- if (ctx->timer) {
- if (ctx->spb_choice == -1) {
- gf_timer_call_cancel (this->ctx, ctx->timer);
- ctx->timer = NULL;
- inode_unref (inode);
- goto unlock;
- }
- goto reset_timer;
- } else {
- if (ctx->spb_choice == -1)
- goto unlock;
+ /* ctx->timer is NULL iff previous value of
+ * ctx->spb_choice is -1
+ */
+ if (ctx->timer) {
+ if (ctx->spb_choice == -1) {
+ if (!gf_timer_call_cancel(this->ctx, ctx->timer)) {
+ ctx->timer = NULL;
+ timer_cancelled = _gf_true;
}
-
- inode = inode_ref (loc->inode);
- goto set_timer;
-
-reset_timer:
- gf_timer_call_cancel (this->ctx, ctx->timer);
- ctx->timer = NULL;
-
-set_timer:
- ctx->timer = gf_timer_call_after (this->ctx, delta,
- afr_set_split_brain_choice_cbk,
- inode);
+ /* If timer cancel failed here it means that the
+ * previous cbk will be executed which will set
+ * spb_choice to -1. So we can consider the
+ * 'valid to -1' case to be a success
+ * (i.e. ret = 0) and goto unlock.
+ */
+ goto unlock;
+ }
+ goto reset_timer;
+ } else {
+ if (ctx->spb_choice == -1)
+ goto unlock;
+ goto set_timer;
}
+
+ reset_timer:
+ ret = gf_timer_call_cancel(this->ctx, ctx->timer);
+ if (ret != 0) {
+ /* We need to bail out now instead of launching a new
+ * timer. Otherwise the cbk of the previous timer event
+ * will cancel the new ctx->timer.
+ */
+ ctx->spb_choice = old_spb_choice;
+ ret = -1;
+ op_errno = EAGAIN;
+ goto unlock;
+ }
+ ctx->timer = NULL;
+ timer_reset = _gf_true;
+
+ set_timer:
+ ctx->timer = gf_timer_call_after(this->ctx, delta,
+ afr_set_split_brain_choice_cbk, inode);
+ if (!ctx->timer) {
+ ctx->spb_choice = old_spb_choice;
+ ret = -1;
+ op_errno = ENOMEM;
+ }
+ if (!timer_reset && ctx->timer)
+ timer_set = _gf_true;
+ if (timer_reset && !ctx->timer)
+ timer_cancelled = _gf_true;
+ }
unlock:
- UNLOCK(&inode->lock);
- inode_invalidate (inode);
+ UNLOCK(&inode->lock);
+post_unlock:
+ if (!timer_set)
+ inode_unref(inode);
+ if (timer_cancelled)
+ inode_unref(inode);
+ /*
+ * We need to invalidate the inode to prevent the kernel from serving
+ * reads from an older cached value despite a change in spb_choice to
+ * a new value.
+ */
+ inode_invalidate(inode);
out:
- if (data)
- GF_FREE (data);
- AFR_STACK_UNWIND (setxattr, frame, ret, op_errno, NULL);
- return 0;
+ GF_FREE(data);
+ AFR_STACK_UNWIND(setxattr, frame, ret, op_errno, NULL);
+ return 0;
}
int
-afr_accused_fill (xlator_t *this, dict_t *xdata, unsigned char *accused,
- afr_transaction_type type)
+afr_accused_fill(xlator_t *this, dict_t *xdata, unsigned char *accused,
+ afr_transaction_type type)
{
- afr_private_t *priv = NULL;
- int i = 0;
- int idx = afr_index_for_transaction_type (type);
- void *pending_raw = NULL;
- int pending[3];
- int ret = 0;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int idx = afr_index_for_transaction_type(type);
+ void *pending_raw = NULL;
+ int pending[3];
+ int ret = 0;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- ret = dict_get_ptr (xdata, priv->pending_key[i],
- &pending_raw);
- if (ret) /* no pending flags */
- continue;
- memcpy (pending, pending_raw, sizeof(pending));
+ for (i = 0; i < priv->child_count; i++) {
+ ret = dict_get_ptr(xdata, priv->pending_key[i], &pending_raw);
+ if (ret) /* no pending flags */
+ continue;
+ memcpy(pending, pending_raw, sizeof(pending));
- if (ntoh32 (pending[idx]))
- accused[i] = 1;
- }
+ if (ntoh32(pending[idx]))
+ accused[i] = 1;
+ }
- return 0;
+ return 0;
}
int
-afr_accuse_smallfiles (xlator_t *this, struct afr_reply *replies,
- unsigned char *data_accused)
+afr_accuse_smallfiles(xlator_t *this, struct afr_reply *replies,
+ unsigned char *data_accused)
{
- int i = 0;
- afr_private_t *priv = NULL;
- uint64_t maxsize = 0;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ uint64_t maxsize = 0;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (replies[i].valid && replies[i].xdata &&
- dict_get (replies[i].xdata, GLUSTERFS_BAD_INODE))
- continue;
- if (data_accused[i])
- continue;
- if (replies[i].poststat.ia_size > maxsize)
- maxsize = replies[i].poststat.ia_size;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid && replies[i].xdata &&
+ dict_get_sizen(replies[i].xdata, GLUSTERFS_BAD_INODE))
+ continue;
+ if (data_accused[i])
+ continue;
+ if (replies[i].poststat.ia_size > maxsize)
+ maxsize = replies[i].poststat.ia_size;
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (data_accused[i])
- continue;
- if (AFR_IS_ARBITER_BRICK(priv, i))
- continue;
- if (replies[i].poststat.ia_size < maxsize)
- data_accused[i] = 1;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (data_accused[i])
+ continue;
+ if (AFR_IS_ARBITER_BRICK(priv, i))
+ continue;
+ if (replies[i].poststat.ia_size < maxsize)
+ data_accused[i] = 1;
+ }
- return 0;
+ return 0;
}
int
-afr_replies_interpret (call_frame_t *frame, xlator_t *this, inode_t *inode,
- gf_boolean_t *start_heal)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- int event_generation = 0;
- int i = 0;
- unsigned char *data_accused = NULL;
- unsigned char *metadata_accused = NULL;
- unsigned char *data_readable = NULL;
- unsigned char *metadata_readable = NULL;
- int ret = 0;
-
- local = frame->local;
- priv = this->private;
- replies = local->replies;
- event_generation = local->event_generation;
-
- data_accused = alloca0 (priv->child_count);
- data_readable = alloca0 (priv->child_count);
- metadata_accused = alloca0 (priv->child_count);
- metadata_readable = alloca0 (priv->child_count);
-
- for (i = 0; i < priv->child_count; i++) {
- data_readable[i] = 1;
- metadata_readable[i] = 1;
- }
- if (AFR_IS_ARBITER_BRICK (priv, ARBITER_BRICK_INDEX)) {
- data_readable[ARBITER_BRICK_INDEX] = 0;
- metadata_readable[ARBITER_BRICK_INDEX] = 0;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid) {
- data_readable[i] = 0;
- metadata_readable[i] = 0;
- continue;
- }
-
- if (replies[i].op_ret == -1) {
- data_readable[i] = 0;
- metadata_readable[i] = 0;
- continue;
- }
-
- if (replies[i].xdata &&
- dict_get (replies[i].xdata, GLUSTERFS_BAD_INODE)) {
- data_readable[i] = 0;
- metadata_readable[i] = 0;
- continue;
- }
-
- afr_accused_fill (this, replies[i].xdata, data_accused,
- (replies[i].poststat.ia_type == IA_IFDIR) ?
- AFR_ENTRY_TRANSACTION : AFR_DATA_TRANSACTION);
-
- afr_accused_fill (this, replies[i].xdata,
- metadata_accused, AFR_METADATA_TRANSACTION);
-
- }
-
- if ((inode->ia_type != IA_IFDIR) &&
- /* We want to accuse small files only when we know for sure that
- * there is no IO happening. Otherwise, the ia_sizes obtained in
- * post-refresh replies may mismatch due to a race between inode-
- * refresh and ongoing writes, causing spurious heal launches*/
- !afr_is_possibly_under_txn (AFR_DATA_TRANSACTION, local, this))
- afr_accuse_smallfiles (this, replies, data_accused);
-
- for (i = 0; i < priv->child_count; i++) {
- if (data_accused[i]) {
- data_readable[i] = 0;
- ret = 1;
- }
- if (metadata_accused[i]) {
- metadata_readable[i] = 0;
- ret = 1;
- }
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (start_heal && priv->child_up[i] &&
- (!data_readable[i] || !metadata_readable[i])) {
- *start_heal = _gf_true;
- break;
- }
- }
- afr_inode_read_subvol_set (inode, this, data_readable,
- metadata_readable, event_generation);
- return ret;
+afr_readables_fill(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ unsigned char *data_accused, unsigned char *metadata_accused,
+ unsigned char *data_readable,
+ unsigned char *metadata_readable, struct afr_reply *replies)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xdata = NULL;
+ int i = 0;
+ int ret = 0;
+ ia_type_t ia_type = IA_INVAL;
+
+ local = frame->local;
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ data_readable[i] = 1;
+ metadata_readable[i] = 1;
+ }
+ if (AFR_IS_ARBITER_BRICK(priv, ARBITER_BRICK_INDEX)) {
+ data_readable[ARBITER_BRICK_INDEX] = 0;
+ metadata_readable[ARBITER_BRICK_INDEX] = 0;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies) { /* Lookup */
+ if (!replies[i].valid || replies[i].op_ret == -1 ||
+ (replies[i].xdata &&
+ dict_get_sizen(replies[i].xdata, GLUSTERFS_BAD_INODE))) {
+ data_readable[i] = 0;
+ metadata_readable[i] = 0;
+ continue;
+ }
+
+ xdata = replies[i].xdata;
+ ia_type = replies[i].poststat.ia_type;
+ } else { /* pre-op xattrop */
+ xdata = local->transaction.changelog_xdata[i];
+ ia_type = inode->ia_type;
+ }
+
+ if (!xdata)
+ continue; /* mkdir_cbk sends NULL xdata_rsp. */
+ afr_accused_fill(this, xdata, data_accused,
+ (ia_type == IA_IFDIR) ? AFR_ENTRY_TRANSACTION
+ : AFR_DATA_TRANSACTION);
+
+ afr_accused_fill(this, xdata, metadata_accused,
+ AFR_METADATA_TRANSACTION);
+ }
+
+ if (replies && ia_type != IA_INVAL && ia_type != IA_IFDIR &&
+ /* We want to accuse small files only when we know for
+ * sure that there is no IO happening. Otherwise, the
+ * ia_sizes obtained in post-refresh replies may
+ * mismatch due to a race between inode-refresh and
+ * ongoing writes, causing spurious heal launches*/
+ !afr_is_possibly_under_txn(AFR_DATA_TRANSACTION, local, this)) {
+ afr_accuse_smallfiles(this, replies, data_accused);
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (data_accused[i]) {
+ data_readable[i] = 0;
+ ret = 1;
+ }
+ if (metadata_accused[i]) {
+ metadata_readable[i] = 0;
+ ret = 1;
+ }
+ }
+ return ret;
}
-
+int
+afr_replies_interpret(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ gf_boolean_t *start_heal)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ int event_generation = 0;
+ int i = 0;
+ unsigned char *data_accused = NULL;
+ unsigned char *metadata_accused = NULL;
+ unsigned char *data_readable = NULL;
+ unsigned char *metadata_readable = NULL;
+ int ret = 0;
+
+ local = frame->local;
+ priv = this->private;
+ replies = local->replies;
+ event_generation = local->event_generation;
+
+ data_accused = alloca0(priv->child_count);
+ data_readable = alloca0(priv->child_count);
+ metadata_accused = alloca0(priv->child_count);
+ metadata_readable = alloca0(priv->child_count);
+
+ ret = afr_readables_fill(frame, this, inode, data_accused, metadata_accused,
+ data_readable, metadata_readable, replies);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (start_heal && priv->child_up[i] &&
+ (data_accused[i] || metadata_accused[i])) {
+ *start_heal = _gf_true;
+ break;
+ }
+ }
+ afr_inode_read_subvol_set(inode, this, data_readable, metadata_readable,
+ event_generation);
+ return ret;
+}
int
-afr_refresh_selfheal_done (int ret, call_frame_t *heal, void *opaque)
+afr_refresh_selfheal_done(int ret, call_frame_t *heal, void *opaque)
{
- if (heal)
- STACK_DESTROY (heal->root);
- return 0;
+ if (heal)
+ AFR_STACK_DESTROY(heal);
+ return 0;
}
int
-afr_inode_refresh_err (call_frame_t *frame, xlator_t *this)
+afr_inode_refresh_err(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- int err = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int err = 0;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (local->replies[i].valid && !local->replies[i].op_ret) {
- err = 0;
- goto ret;
- }
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].valid && !local->replies[i].op_ret) {
+ err = 0;
+ goto ret;
+ }
+ }
- err = afr_final_errno (local, priv);
+ err = afr_final_errno(local, priv);
ret:
- return -err;
+ return err;
}
gf_boolean_t
-afr_selfheal_enabled (xlator_t *this)
+afr_selfheal_enabled(const xlator_t *this)
{
- afr_private_t *priv = NULL;
- gf_boolean_t data = _gf_false;
- int ret = 0;
+ const afr_private_t *priv = this->private;
- priv = this->private;
+ return priv->data_self_heal || priv->metadata_self_heal ||
+ priv->entry_self_heal;
+}
- ret = gf_string2boolean (priv->data_self_heal, &data);
- GF_ASSERT (!ret);
+int
+afr_txn_refresh_done(call_frame_t *frame, xlator_t *this, int err)
+{
+ call_frame_t *heal_frame = NULL;
+ afr_local_t *heal_local = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ inode_t *inode = NULL;
+ int event_generation = 0;
+ int read_subvol = -1;
+ int ret = 0;
+
+ local = frame->local;
+ inode = local->inode;
+ priv = this->private;
+
+ if (err)
+ goto refresh_done;
+
+ if (local->op == GF_FOP_LOOKUP)
+ goto refresh_done;
+
+ ret = afr_inode_get_readable(frame, inode, this, local->readable,
+ &event_generation, local->transaction.type);
+
+ if (ret == -EIO) {
+ /* No readable subvolume even after refresh ==> splitbrain.*/
+ if (!priv->fav_child_policy) {
+ err = EIO;
+ goto refresh_done;
+ }
+ read_subvol = afr_sh_get_fav_by_policy(this, local->replies, inode,
+ NULL);
+ if (read_subvol == -1) {
+ err = EIO;
+ goto refresh_done;
+ }
+
+ heal_frame = afr_frame_create(this, NULL);
+ if (!heal_frame) {
+ err = EIO;
+ goto refresh_done;
+ }
+ heal_local = heal_frame->local;
+ heal_local->xdata_req = dict_new();
+ if (!heal_local->xdata_req) {
+ err = EIO;
+ AFR_STACK_DESTROY(heal_frame);
+ goto refresh_done;
+ }
+ heal_local->heal_frame = frame;
+ ret = synctask_new(this->ctx->env, afr_fav_child_reset_sink_xattrs,
+ afr_fav_child_reset_sink_xattrs_cbk, heal_frame,
+ heal_frame);
+ return 0;
+ }
+
+refresh_done:
+ afr_local_replies_wipe(local, this->private);
+ local->refreshfn(frame, this, err);
- return data || priv->metadata_self_heal || priv->entry_self_heal;
+ return 0;
}
int
-afr_inode_refresh_done (call_frame_t *frame, xlator_t *this)
-{
- call_frame_t *heal_frame = NULL;
- afr_local_t *local = NULL;
- gf_boolean_t start_heal = _gf_false;
- afr_local_t *heal_local = NULL;
- int op_errno = ENOMEM;
- int ret = 0;
- int err = 0;
-
- local = frame->local;
-
- ret = afr_replies_interpret (frame, this, local->refreshinode,
- &start_heal);
-
- err = afr_inode_refresh_err (frame, this);
-
- afr_local_replies_wipe (local, this->private);
-
- if (ret && afr_selfheal_enabled (this) && start_heal) {
- heal_frame = copy_frame (frame);
- if (!heal_frame)
- goto refresh_done;
- heal_frame->root->pid = GF_CLIENT_PID_SELF_HEALD;
- heal_local = AFR_FRAME_INIT (heal_frame, op_errno);
- if (!heal_local) {
- AFR_STACK_DESTROY (heal_frame);
- goto refresh_done;
- }
- heal_local->refreshinode = inode_ref (local->refreshinode);
- heal_local->heal_frame = heal_frame;
- afr_throttled_selfheal (heal_frame, this);
- }
+afr_inode_refresh_done(call_frame_t *frame, xlator_t *this, int error)
+{
+ call_frame_t *heal_frame = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ gf_boolean_t start_heal = _gf_false;
+ afr_local_t *heal_local = NULL;
+ unsigned char *success_replies = NULL;
+ int ret = 0;
+
+ if (error != 0) {
+ goto refresh_done;
+ }
+
+ local = frame->local;
+ priv = this->private;
+ success_replies = alloca0(priv->child_count);
+ afr_fill_success_replies(local, priv, success_replies);
+
+ if (priv->thin_arbiter_count && local->is_read_txn &&
+ AFR_COUNT(success_replies, priv->child_count) != priv->child_count) {
+ /* We need to query the good bricks and/or thin-arbiter.*/
+ if (success_replies[0]) {
+ local->read_txn_query_child = AFR_CHILD_ZERO;
+ } else if (success_replies[1]) {
+ local->read_txn_query_child = AFR_CHILD_ONE;
+ }
+ error = EINVAL;
+ goto refresh_done;
+ }
+
+ if (!afr_has_quorum(success_replies, this, frame)) {
+ error = afr_final_errno(frame->local, this->private);
+ if (!error)
+ error = afr_quorum_errno(priv);
+ goto refresh_done;
+ }
+
+ ret = afr_replies_interpret(frame, this, local->refreshinode, &start_heal);
+
+ if (ret && afr_selfheal_enabled(this) && start_heal) {
+ heal_frame = afr_frame_create(this, NULL);
+ if (!heal_frame)
+ goto refresh_done;
+ heal_local = heal_frame->local;
+ heal_local->refreshinode = inode_ref(local->refreshinode);
+ heal_local->heal_frame = heal_frame;
+ if (!afr_throttled_selfheal(heal_frame, this)) {
+ AFR_STACK_DESTROY(heal_frame);
+ goto refresh_done;
+ }
+ }
refresh_done:
- local->refreshfn (frame, this, err);
+ afr_txn_refresh_done(frame, this, error);
- return 0;
+ return 0;
}
void
-afr_inode_refresh_subvol_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, struct iatt *buf,
- dict_t *xdata, struct iatt *par)
-{
- afr_local_t *local = NULL;
- int call_child = (long) cookie;
- int8_t need_heal = 1;
- int call_count = 0;
- GF_UNUSED int ret = 0;
+afr_inode_refresh_subvol_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *buf,
+ dict_t *xdata, struct iatt *par)
+{
+ afr_local_t *local = NULL;
+ int call_child = (long)cookie;
+ int8_t need_heal = 1;
+ int call_count = 0;
+ int ret = 0;
+
+ local = frame->local;
+ local->replies[call_child].valid = 1;
+ local->replies[call_child].op_ret = op_ret;
+ local->replies[call_child].op_errno = op_errno;
+ if (op_ret != -1) {
+ local->replies[call_child].poststat = *buf;
+ if (par)
+ local->replies[call_child].postparent = *par;
+ if (xdata)
+ local->replies[call_child].xdata = dict_ref(xdata);
+ }
- local = frame->local;
- local->replies[call_child].valid = 1;
- local->replies[call_child].op_ret = op_ret;
- local->replies[call_child].op_errno = op_errno;
- if (op_ret != -1) {
- local->replies[call_child].poststat = *buf;
- if (par)
- local->replies[call_child].postparent = *par;
- if (xdata)
- local->replies[call_child].xdata = dict_ref (xdata);
- }
- if (xdata) {
- ret = dict_get_int8 (xdata, "link-count", &need_heal);
- local->replies[call_child].need_heal = need_heal;
- } else {
- local->replies[call_child].need_heal = need_heal;
+ if (xdata) {
+ ret = dict_get_int8(xdata, "link-count", &need_heal);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to get link count");
}
+ }
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- afr_set_need_heal (this, local);
- afr_inode_refresh_done (frame, this);
+ local->replies[call_child].need_heal = need_heal;
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ afr_set_need_heal(this, local);
+ ret = afr_inode_refresh_err(frame, this);
+ if (ret) {
+ gf_msg_debug(this->name, ret, "afr_inode_refresh_err failed");
}
-
+ afr_inode_refresh_done(frame, this, ret);
+ }
}
int
-afr_inode_refresh_subvol_with_lookup_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int op_ret,
- int op_errno, inode_t *inode,
- struct iatt *buf, dict_t *xdata,
- struct iatt *par)
+afr_inode_refresh_subvol_with_lookup_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata,
+ struct iatt *par)
{
- afr_inode_refresh_subvol_cbk (frame, cookie, this, op_ret, op_errno,
- buf, xdata, par);
- return 0;
+ afr_inode_refresh_subvol_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ xdata, par);
+ return 0;
}
-
int
-afr_inode_refresh_subvol_with_lookup (call_frame_t *frame, xlator_t *this,
- int i, inode_t *inode, dict_t *xdata)
+afr_inode_refresh_subvol_with_lookup(call_frame_t *frame, xlator_t *this, int i,
+ inode_t *inode, uuid_t gfid, dict_t *xdata)
{
- loc_t loc = {0, };
- afr_private_t *priv = NULL;
+ loc_t loc = {
+ 0,
+ };
+ afr_private_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- loc.inode = inode;
- gf_uuid_copy (loc.gfid, inode->gfid);
+ loc.inode = inode;
+ if (gf_uuid_is_null(inode->gfid) && gfid) {
+ /* To handle setattr/setxattr on yet to be linked inode from
+ * dht */
+ gf_uuid_copy(loc.gfid, gfid);
+ } else {
+ gf_uuid_copy(loc.gfid, inode->gfid);
+ }
- STACK_WIND_COOKIE (frame, afr_inode_refresh_subvol_with_lookup_cbk,
- (void *) (long) i, priv->children[i],
- priv->children[i]->fops->lookup, &loc, xdata);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_inode_refresh_subvol_with_lookup_cbk,
+ (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->lookup, &loc, xdata);
+ return 0;
}
int
-afr_inode_refresh_subvol_with_fstat_cbk (call_frame_t *frame,
- void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno,
- struct iatt *buf, dict_t *xdata)
+afr_inode_refresh_subvol_with_fstat_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
{
- afr_inode_refresh_subvol_cbk (frame, cookie, this, op_ret, op_errno,
- buf, xdata, NULL);
- return 0;
+ afr_inode_refresh_subvol_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ xdata, NULL);
+ return 0;
}
int
-afr_inode_refresh_subvol_with_fstat (call_frame_t *frame, xlator_t *this, int i,
- dict_t *xdata)
+afr_inode_refresh_subvol_with_fstat(call_frame_t *frame, xlator_t *this, int i,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
- priv = this->private;
- local = frame->local;
+ priv = this->private;
+ local = frame->local;
- STACK_WIND_COOKIE (frame, afr_inode_refresh_subvol_with_fstat_cbk,
- (void *) (long) i, priv->children[i],
- priv->children[i]->fops->fstat, local->fd, xdata);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_inode_refresh_subvol_with_fstat_cbk,
+ (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->fstat, local->fd, xdata);
+ return 0;
}
int
-afr_inode_refresh_do (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = 0;
- int i = 0;
- int ret = 0;
- dict_t *xdata = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
- unsigned char *wind_subvols = NULL;
-
- priv = this->private;
- local = frame->local;
- wind_subvols = alloca0 (priv->child_count);
-
- afr_local_replies_wipe (local, priv);
-
- if (local->fd) {
- fd_ctx = afr_fd_ctx_get (local->fd, this);
- if (!fd_ctx) {
- afr_inode_refresh_done (frame, this);
- return 0;
- }
- }
+afr_inode_refresh_do(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+ int i = 0;
+ int ret = 0;
+ dict_t *xdata = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ unsigned char *wind_subvols = NULL;
- xdata = dict_new ();
- if (!xdata) {
- afr_inode_refresh_done (frame, this);
- return 0;
- }
+ priv = this->private;
+ local = frame->local;
+ wind_subvols = alloca0(priv->child_count);
- if (afr_xattr_req_prepare (this, xdata) != 0) {
- dict_unref (xdata);
- afr_inode_refresh_done (frame, this);
- return 0;
- }
+ afr_local_replies_wipe(local, priv);
- ret = dict_set_str (xdata, "link-count", GF_XATTROP_INDEX_COUNT);
- if (ret) {
- gf_msg_debug (this->name, -ret,
- "Unable to set link-count in dict ");
+ if (local->fd) {
+ fd_ctx = afr_fd_ctx_get(local->fd, this);
+ if (!fd_ctx) {
+ afr_inode_refresh_done(frame, this, EINVAL);
+ return 0;
}
+ }
- ret = dict_set_str (xdata, GLUSTERFS_INODELK_DOM_COUNT, this->name);
- if (ret) {
- gf_msg_debug (this->name, -ret,
- "Unable to set inodelk-dom-count in dict ");
+ xdata = dict_new();
+ if (!xdata) {
+ afr_inode_refresh_done(frame, this, ENOMEM);
+ return 0;
+ }
- }
+ ret = afr_xattr_req_prepare(this, xdata);
+ if (ret != 0) {
+ dict_unref(xdata);
+ afr_inode_refresh_done(frame, this, -ret);
+ return 0;
+ }
- if (local->fd) {
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i] &&
- fd_ctx->opened_on[i] == AFR_FD_OPENED)
- wind_subvols[i] = 1;
- }
- } else {
- memcpy (wind_subvols, local->child_up,
- sizeof (*local->child_up) * priv->child_count);
- }
+ ret = dict_set_sizen_str_sizen(xdata, "link-count", GF_XATTROP_INDEX_COUNT);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
+ }
- local->call_count = AFR_COUNT (wind_subvols, priv->child_count);
+ ret = dict_set_str_sizen(xdata, GLUSTERFS_INODELK_DOM_COUNT, this->name);
+ if (ret) {
+ gf_msg_debug(this->name, -ret,
+ "Unable to set inodelk-dom-count in dict ");
+ }
- call_count = local->call_count;
- if (!call_count) {
- dict_unref (xdata);
- afr_inode_refresh_done (frame, this);
- return 0;
- }
- for (i = 0; i < priv->child_count; i++) {
- if (!wind_subvols[i])
- continue;
+ if (local->fd) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i] && fd_ctx->opened_on[i] == AFR_FD_OPENED)
+ wind_subvols[i] = 1;
+ }
+ } else {
+ memcpy(wind_subvols, local->child_up,
+ sizeof(*local->child_up) * priv->child_count);
+ }
+
+ local->call_count = AFR_COUNT(wind_subvols, priv->child_count);
+
+ call_count = local->call_count;
+ if (!call_count) {
+ dict_unref(xdata);
+ if (local->fd && AFR_COUNT(local->child_up, priv->child_count))
+ afr_inode_refresh_done(frame, this, EBADFD);
+ else
+ afr_inode_refresh_done(frame, this, ENOTCONN);
+ return 0;
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!wind_subvols[i])
+ continue;
- if (local->fd)
- afr_inode_refresh_subvol_with_fstat (frame, this, i,
- xdata);
- else
- afr_inode_refresh_subvol_with_lookup (frame, this, i,
- local->refreshinode, xdata);
+ if (local->fd)
+ afr_inode_refresh_subvol_with_fstat(frame, this, i, xdata);
+ else
+ afr_inode_refresh_subvol_with_lookup(
+ frame, this, i, local->refreshinode, local->refreshgfid, xdata);
- if (!--call_count)
- break;
- }
+ if (!--call_count)
+ break;
+ }
- dict_unref (xdata);
+ dict_unref(xdata);
- return 0;
+ return 0;
}
-
int
-afr_inode_refresh (call_frame_t *frame, xlator_t *this, inode_t *inode,
- afr_inode_refresh_cbk_t refreshfn)
+afr_inode_refresh(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ uuid_t gfid, afr_inode_refresh_cbk_t refreshfn)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- local->refreshfn = refreshfn;
+ local->refreshfn = refreshfn;
- if (local->refreshinode) {
- inode_unref (local->refreshinode);
- local->refreshinode = NULL;
- }
+ if (local->refreshinode) {
+ inode_unref(local->refreshinode);
+ local->refreshinode = NULL;
+ }
- local->refreshinode = inode_ref (inode);
+ local->refreshinode = inode_ref(inode);
- afr_inode_refresh_do (frame, this);
+ if (gfid)
+ gf_uuid_copy(local->refreshgfid, gfid);
+ else
+ gf_uuid_clear(local->refreshgfid);
- return 0;
-}
+ afr_inode_refresh_do(frame, this);
+ return 0;
+}
int
-afr_xattr_req_prepare (xlator_t *this, dict_t *xattr_req)
+afr_xattr_req_prepare(xlator_t *this, dict_t *xattr_req)
{
- int i = 0;
- afr_private_t *priv = NULL;
- int ret = 0;
-
- priv = this->private;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ int ret = 0;
- for (i = 0; i < priv->child_count; i++) {
- ret = dict_set_uint64 (xattr_req, priv->pending_key[i],
- AFR_NUM_CHANGE_LOGS * sizeof(int));
- if (ret < 0)
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Unable to set dict value for %s",
- priv->pending_key[i]);
- /* 3 = data+metadata+entry */
- }
- ret = dict_set_uint64 (xattr_req, AFR_DIRTY,
- AFR_NUM_CHANGE_LOGS * sizeof(int));
- if (ret) {
- gf_msg_debug (this->name, -ret, "failed to set dirty "
- "query flag");
- }
+ priv = this->private;
- ret = dict_set_int32 (xattr_req, "list-xattr", 1);
- if (ret) {
- gf_msg_debug (this->name, -ret,
- "Unable to set list-xattr in dict ");
- }
+ for (i = 0; i < priv->child_count; i++) {
+ ret = dict_set_uint64(xattr_req, priv->pending_key[i],
+ AFR_NUM_CHANGE_LOGS * sizeof(int));
+ if (ret < 0)
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Unable to set dict value for %s", priv->pending_key[i]);
+ /* 3 = data+metadata+entry */
+ }
+ ret = dict_set_uint64(xattr_req, AFR_DIRTY,
+ AFR_NUM_CHANGE_LOGS * sizeof(int));
+ if (ret) {
+ gf_msg_debug(this->name, -ret,
+ "failed to set dirty "
+ "query flag");
+ }
+
+ ret = dict_set_int32_sizen(xattr_req, "list-xattr", 1);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set list-xattr in dict ");
+ }
+
+ return ret;
+}
- return ret;
+int
+afr_lookup_xattr_req_prepare(afr_local_t *local, xlator_t *this,
+ dict_t *xattr_req, loc_t *loc)
+{
+ int ret = -ENOMEM;
+
+ if (!local->xattr_req)
+ local->xattr_req = dict_new();
+
+ if (!local->xattr_req)
+ goto out;
+
+ if (xattr_req && (xattr_req != local->xattr_req))
+ dict_copy(xattr_req, local->xattr_req);
+
+ ret = afr_xattr_req_prepare(this, local->xattr_req);
+
+ ret = dict_set_uint64(local->xattr_req, GLUSTERFS_INODELK_COUNT, 0);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Unable to set dict value for %s", loc->path,
+ GLUSTERFS_INODELK_COUNT);
+ }
+ ret = dict_set_uint64(local->xattr_req, GLUSTERFS_ENTRYLK_COUNT, 0);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Unable to set dict value for %s", loc->path,
+ GLUSTERFS_ENTRYLK_COUNT);
+ }
+
+ ret = dict_set_uint32(local->xattr_req, GLUSTERFS_PARENT_ENTRYLK, 0);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Unable to set dict value for %s", loc->path,
+ GLUSTERFS_PARENT_ENTRYLK);
+ }
+
+ ret = dict_set_sizen_str_sizen(local->xattr_req, "link-count",
+ GF_XATTROP_INDEX_COUNT);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
+ }
+
+ ret = 0;
+out:
+ return ret;
}
int
-afr_lookup_xattr_req_prepare (afr_local_t *local, xlator_t *this,
- dict_t *xattr_req, loc_t *loc)
+afr_least_pending_reads_child(afr_private_t *priv, unsigned char *readable)
{
- int ret = -ENOMEM;
-
- if (!local->xattr_req)
- local->xattr_req = dict_new ();
+ int i = 0;
+ int child = -1;
+ int64_t read_iter = -1;
+ int64_t pending_read = -1;
- if (!local->xattr_req)
- goto out;
-
- if (xattr_req && (xattr_req != local->xattr_req))
- dict_copy (xattr_req, local->xattr_req);
+ for (i = 0; i < priv->child_count; i++) {
+ if (AFR_IS_ARBITER_BRICK(priv, i) || !readable[i])
+ continue;
+ read_iter = GF_ATOMIC_GET(priv->pending_reads[i]);
+ if (child == -1 || read_iter < pending_read) {
+ pending_read = read_iter;
+ child = i;
+ }
+ }
- ret = afr_xattr_req_prepare (this, local->xattr_req);
+ return child;
+}
- ret = dict_set_uint64 (local->xattr_req, GLUSTERFS_INODELK_COUNT, 0);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "%s: Unable to set dict value for %s",
- loc->path, GLUSTERFS_INODELK_COUNT);
- }
- ret = dict_set_uint64 (local->xattr_req, GLUSTERFS_ENTRYLK_COUNT, 0);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "%s: Unable to set dict value for %s",
- loc->path, GLUSTERFS_ENTRYLK_COUNT);
- }
+static int32_t
+afr_least_latency_child(afr_private_t *priv, unsigned char *readable)
+{
+ int32_t i = 0;
+ int child = -1;
- ret = dict_set_uint32 (local->xattr_req, GLUSTERFS_PARENT_ENTRYLK, 0);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "%s: Unable to set dict value for %s",
- loc->path, GLUSTERFS_PARENT_ENTRYLK);
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (AFR_IS_ARBITER_BRICK(priv, i) || !readable[i] ||
+ priv->child_latency[i] < 0)
+ continue;
- ret = dict_set_str (xattr_req, "link-count", GF_XATTROP_INDEX_COUNT);
- if (ret) {
- gf_msg_debug (this->name, -ret,
- "Unable to set link-count in dict ");
+ if (child == -1 ||
+ priv->child_latency[i] < priv->child_latency[child]) {
+ child = i;
}
-
- ret = 0;
-out:
- return ret;
+ }
+ return child;
}
-
-int
-afr_hash_child (afr_read_subvol_args_t *args, int32_t child_count, int hashmode)
+static int32_t
+afr_least_latency_times_pending_reads_child(afr_private_t *priv,
+ unsigned char *readable)
{
- uuid_t gfid_copy = {0,};
- pid_t pid;
+ int32_t i = 0;
+ int child = -1;
+ int64_t pending_read = 0;
+ int64_t latency = -1;
+ int64_t least_latency = -1;
- if (!hashmode) {
- return -1;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (AFR_IS_ARBITER_BRICK(priv, i) || !readable[i] ||
+ priv->child_latency[i] < 0)
+ continue;
- gf_uuid_copy (gfid_copy, args->gfid);
+ pending_read = GF_ATOMIC_GET(priv->pending_reads[i]);
+ latency = (pending_read + 1) * priv->child_latency[i];
- if ((hashmode > 1) && (args->ia_type != IA_IFDIR)) {
+ if (child == -1 || latency < least_latency) {
+ least_latency = latency;
+ child = i;
+ }
+ }
+ return child;
+}
+
+int
+afr_hash_child(afr_read_subvol_args_t *args, afr_private_t *priv,
+ unsigned char *readable)
+{
+ uuid_t gfid_copy = {
+ 0,
+ };
+ pid_t pid;
+ int child = -1;
+
+ switch (priv->hash_mode) {
+ case AFR_READ_POLICY_FIRST_UP:
+ break;
+ case AFR_READ_POLICY_GFID_HASH:
+ gf_uuid_copy(gfid_copy, args->gfid);
+ child = SuperFastHash((char *)gfid_copy, sizeof(gfid_copy)) %
+ priv->child_count;
+ break;
+ case AFR_READ_POLICY_GFID_PID_HASH:
+ if (args->ia_type != IA_IFDIR) {
/*
* Why getpid? Because it's one of the cheapest calls
- * available - faster than gethostname etc. - and returns a
- * constant-length value that's sure to be shorter than a UUID.
- * It's still very unlikely to be the same across clients, so
- * it still provides good mixing. We're not trying for
- * perfection here. All we need is a low probability that
- * multiple clients won't converge on the same subvolume.
+ * available - faster than gethostname etc. - and
+ * returns a constant-length value that's sure to be
+ * shorter than a UUID. It's still very unlikely to be
+ * the same across clients, so it still provides good
+ * mixing. We're not trying for perfection here. All we
+ * need is a low probability that multiple clients
+ * won't converge on the same subvolume.
*/
+ gf_uuid_copy(gfid_copy, args->gfid);
pid = getpid();
- memcpy (gfid_copy, &pid, sizeof(pid));
- }
-
- return SuperFastHash((char *)gfid_copy,
- sizeof(gfid_copy)) % child_count;
+ *(pid_t *)gfid_copy ^= pid;
+ }
+ child = SuperFastHash((char *)gfid_copy, sizeof(gfid_copy)) %
+ priv->child_count;
+ break;
+ case AFR_READ_POLICY_LESS_LOAD:
+ child = afr_least_pending_reads_child(priv, readable);
+ break;
+ case AFR_READ_POLICY_LEAST_LATENCY:
+ child = afr_least_latency_child(priv, readable);
+ break;
+ case AFR_READ_POLICY_LOAD_LATENCY_HYBRID:
+ child = afr_least_latency_times_pending_reads_child(priv, readable);
+ break;
+ }
+
+ return child;
}
-
int
-afr_read_subvol_select_by_policy (inode_t *inode, xlator_t *this,
- unsigned char *readable,
- afr_read_subvol_args_t *args)
+afr_read_subvol_select_by_policy(inode_t *inode, xlator_t *this,
+ unsigned char *readable,
+ afr_read_subvol_args_t *args)
{
- int i = 0;
- int read_subvol = -1;
- afr_private_t *priv = NULL;
- afr_read_subvol_args_t local_args = {0,};
+ int i = 0;
+ int read_subvol = -1;
+ afr_private_t *priv = NULL;
+ afr_read_subvol_args_t local_args = {
+ 0,
+ };
- priv = this->private;
+ priv = this->private;
- /* first preference - explicitly specified or local subvolume */
- if (priv->read_child >= 0 && readable[priv->read_child])
- return priv->read_child;
+ /* first preference - explicitly specified or local subvolume */
+ if (priv->read_child >= 0 && readable[priv->read_child])
+ return priv->read_child;
- if (inode_is_linked (inode)) {
- gf_uuid_copy (local_args.gfid, inode->gfid);
- local_args.ia_type = inode->ia_type;
- } else if (args) {
- local_args = *args;
- }
+ if (inode_is_linked(inode)) {
+ gf_uuid_copy(local_args.gfid, inode->gfid);
+ local_args.ia_type = inode->ia_type;
+ } else if (args) {
+ local_args = *args;
+ }
- /* second preference - use hashed mode */
- read_subvol = afr_hash_child (&local_args, priv->child_count,
- priv->hash_mode);
- if (read_subvol >= 0 && readable[read_subvol])
- return read_subvol;
+ /* second preference - use hashed mode */
+ read_subvol = afr_hash_child(&local_args, priv, readable);
+ if (read_subvol >= 0 && readable[read_subvol])
+ return read_subvol;
- for (i = 0; i < priv->child_count; i++) {
- if (readable[i])
- return i;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (readable[i])
+ return i;
+ }
- /* no readable subvolumes, either split brain or all subvols down */
+ /* no readable subvolumes, either split brain or all subvols down */
- return -1;
+ return -1;
}
-
int
-afr_inode_read_subvol_type_get (inode_t *inode, xlator_t *this,
- unsigned char *readable, int *event_p,
- int type)
+afr_inode_read_subvol_type_get(inode_t *inode, xlator_t *this,
+ unsigned char *readable, int *event_p, int type)
{
- int ret = -1;
+ int ret = -1;
- if (type == AFR_METADATA_TRANSACTION)
- ret = afr_inode_read_subvol_get (inode, this, 0, readable,
- event_p);
- else
- ret = afr_inode_read_subvol_get (inode, this, readable, 0,
- event_p);
- return ret;
+ if (type == AFR_METADATA_TRANSACTION)
+ ret = afr_inode_read_subvol_get(inode, this, 0, readable, event_p);
+ else
+ ret = afr_inode_read_subvol_get(inode, this, readable, 0, event_p);
+ return ret;
}
+void
+afr_readables_intersect_get(inode_t *inode, xlator_t *this, int *event,
+ unsigned char *intersection)
+{
+ afr_private_t *priv = NULL;
+ unsigned char *data_readable = NULL;
+ unsigned char *metadata_readable = NULL;
+ unsigned char *intersect = NULL;
+
+ priv = this->private;
+ data_readable = alloca0(priv->child_count);
+ metadata_readable = alloca0(priv->child_count);
+ intersect = alloca0(priv->child_count);
+
+ afr_inode_read_subvol_get(inode, this, data_readable, metadata_readable,
+ event);
+
+ AFR_INTERSECT(intersect, data_readable, metadata_readable,
+ priv->child_count);
+ if (intersection)
+ memcpy(intersection, intersect,
+ sizeof(*intersection) * priv->child_count);
+}
int
-afr_read_subvol_get (inode_t *inode, xlator_t *this, int *subvol_p,
- int *event_p, afr_transaction_type type,
- afr_read_subvol_args_t *args)
+afr_read_subvol_get(inode_t *inode, xlator_t *this, int *subvol_p,
+ unsigned char *readables, int *event_p,
+ afr_transaction_type type, afr_read_subvol_args_t *args)
{
- afr_private_t *priv = NULL;
- unsigned char *data_readable = NULL;
- unsigned char *metadata_readable = NULL;
- unsigned char *readable = NULL;
- unsigned char *intersection = NULL;
- int subvol = -1;
- int event = 0;
-
- priv = this->private;
+ afr_private_t *priv = NULL;
+ unsigned char *readable = NULL;
+ unsigned char *intersection = NULL;
+ int subvol = -1;
+ int event = 0;
- readable = alloca0 (priv->child_count);
- data_readable = alloca0 (priv->child_count);
- metadata_readable = alloca0 (priv->child_count);
- intersection = alloca0 (priv->child_count);
+ priv = this->private;
- afr_inode_read_subvol_type_get (inode, this, readable, &event, type);
+ readable = alloca0(priv->child_count);
+ intersection = alloca0(priv->child_count);
- afr_inode_read_subvol_get (inode, this, data_readable, metadata_readable,
- &event);
+ afr_inode_read_subvol_type_get(inode, this, readable, &event, type);
- AFR_INTERSECT (intersection, data_readable, metadata_readable,
- priv->child_count);
+ afr_readables_intersect_get(inode, this, &event, intersection);
- if (AFR_COUNT (intersection, priv->child_count) > 0)
- subvol = afr_read_subvol_select_by_policy (inode, this,
- intersection, args);
- else
- subvol = afr_read_subvol_select_by_policy (inode, this,
- readable, args);
- if (subvol_p)
- *subvol_p = subvol;
- if (event_p)
- *event_p = event;
- return subvol;
+ if (AFR_COUNT(intersection, priv->child_count) > 0)
+ subvol = afr_read_subvol_select_by_policy(inode, this, intersection,
+ args);
+ else
+ subvol = afr_read_subvol_select_by_policy(inode, this, readable, args);
+ if (subvol_p)
+ *subvol_p = subvol;
+ if (event_p)
+ *event_p = event;
+ if (readables)
+ memcpy(readables, readable, sizeof(*readables) * priv->child_count);
+ return subvol;
}
-
void
-afr_local_transaction_cleanup (afr_local_t *local, xlator_t *this)
+afr_local_transaction_cleanup(afr_local_t *local, xlator_t *this)
{
- afr_private_t *priv = NULL;
- int i = 0;
+ afr_private_t *priv = NULL;
+ int i = 0;
- priv = this->private;
+ priv = this->private;
- afr_matrix_cleanup (local->pending, priv->child_count);
-
- GF_FREE (local->internal_lock.locked_nodes);
-
- for (i = 0; local->internal_lock.inodelk[i].domain; i++) {
- GF_FREE (local->internal_lock.inodelk[i].locked_nodes);
- }
+ afr_matrix_cleanup(local->pending, priv->child_count);
- GF_FREE (local->internal_lock.lower_locked_nodes);
+ GF_FREE(local->internal_lock.lower_locked_nodes);
- afr_entry_lockee_cleanup (&local->internal_lock);
+ afr_lockees_cleanup(&local->internal_lock);
- GF_FREE (local->transaction.pre_op);
+ GF_FREE(local->transaction.pre_op);
- GF_FREE (local->transaction.pre_op_sources);
- if (local->transaction.pre_op_xdata) {
- for (i = 0; i < priv->child_count; i++) {
- if (!local->transaction.pre_op_xdata[i])
- continue;
- dict_unref (local->transaction.pre_op_xdata[i]);
- }
- GF_FREE (local->transaction.pre_op_xdata);
+ GF_FREE(local->transaction.pre_op_sources);
+ if (local->transaction.changelog_xdata) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.changelog_xdata[i])
+ continue;
+ dict_unref(local->transaction.changelog_xdata[i]);
}
+ GF_FREE(local->transaction.changelog_xdata);
+ }
- GF_FREE (local->transaction.eager_lock);
- GF_FREE (local->transaction.failed_subvols);
-
- GF_FREE (local->transaction.basename);
- GF_FREE (local->transaction.new_basename);
+ GF_FREE(local->transaction.failed_subvols);
- loc_wipe (&local->transaction.parent_loc);
- loc_wipe (&local->transaction.new_parent_loc);
+ GF_FREE(local->transaction.basename);
+ GF_FREE(local->transaction.new_basename);
+ loc_wipe(&local->transaction.parent_loc);
+ loc_wipe(&local->transaction.new_parent_loc);
}
-
void
-afr_replies_wipe (struct afr_reply *replies, int count)
+afr_reply_wipe(struct afr_reply *reply)
{
- int i = 0;
+ if (reply->xdata) {
+ dict_unref(reply->xdata);
+ reply->xdata = NULL;
+ }
- for (i = 0; i < count; i++) {
- if (replies[i].xdata) {
- dict_unref (replies[i].xdata);
- replies[i].xdata = NULL;
- }
+ if (reply->xattr) {
+ dict_unref(reply->xattr);
+ reply->xattr = NULL;
+ }
+}
- if (replies[i].xattr) {
- dict_unref (replies[i].xattr);
- replies[i].xattr = NULL;
- }
- }
+void
+afr_replies_wipe(struct afr_reply *replies, int count)
+{
+ int i = 0;
+
+ for (i = 0; i < count; i++) {
+ afr_reply_wipe(&replies[i]);
+ }
}
void
-afr_local_replies_wipe (afr_local_t *local, afr_private_t *priv)
+afr_local_replies_wipe(afr_local_t *local, afr_private_t *priv)
{
+ if (!local->replies)
+ return;
- if (!local->replies)
- return;
+ afr_replies_wipe(local->replies, priv->child_count);
- afr_replies_wipe (local->replies, priv->child_count);
+ memset(local->replies, 0, sizeof(*local->replies) * priv->child_count);
+}
- memset (local->replies, 0, sizeof(*local->replies) * priv->child_count);
+static gf_boolean_t
+afr_fop_lock_is_unlock(call_frame_t *frame)
+{
+ afr_local_t *local = frame->local;
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ if ((F_UNLCK == local->cont.inodelk.in_flock.l_type) &&
+ (local->cont.inodelk.in_cmd == F_SETLKW ||
+ local->cont.inodelk.in_cmd == F_SETLK))
+ return _gf_true;
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ if (ENTRYLK_UNLOCK == local->cont.entrylk.in_cmd)
+ return _gf_true;
+ break;
+ default:
+ return _gf_false;
+ }
+ return _gf_false;
}
-void
-afr_remove_eager_lock_stub (afr_local_t *local)
-{
- LOCK (&local->fd->lock);
- {
- list_del_init (&local->transaction.eager_locked);
- }
- UNLOCK (&local->fd->lock);
+static gf_boolean_t
+afr_lk_is_unlock(int32_t cmd, struct gf_flock *flock)
+{
+ switch (cmd) {
+ case F_RESLK_UNLCK:
+ return _gf_true;
+ break;
+
+#if F_SETLKW != F_SETLKW64
+ case F_SETLKW64:
+#endif
+ case F_SETLKW:
+
+#if F_SETLK != F_SETLK64
+ case F_SETLK64:
+#endif
+ case F_SETLK:
+ if (F_UNLCK == flock->l_type)
+ return _gf_true;
+ break;
+ default:
+ return _gf_false;
+ }
+ return _gf_false;
}
void
-afr_local_cleanup (afr_local_t *local, xlator_t *this)
+afr_handle_inconsistent_fop(call_frame_t *frame, int32_t *op_ret,
+ int32_t *op_errno)
{
- afr_private_t * priv = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
- if (!local)
- return;
+ if (!frame || !frame->this || !frame->local || !frame->this->private)
+ return;
- syncbarrier_destroy (&local->barrier);
+ if (*op_ret < 0)
+ return;
- if (local->transaction.eager_lock_on &&
- !list_empty (&local->transaction.eager_locked))
- afr_remove_eager_lock_stub (local);
+ /* Failing inodelk/entrylk/lk here is not a good idea because we
+ * need to cleanup the locks on the other bricks if we choose to fail
+ * the fop here. The brick may go down just after unwind happens as well
+ * so anyways the fop will fail when the next fop is sent so leaving
+ * it like this for now.*/
+ local = frame->local;
+ switch (local->op) {
+ case GF_FOP_LOOKUP:
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ case GF_FOP_LK:
+ return;
+ default:
+ break;
+ }
- afr_local_transaction_cleanup (local, this);
+ priv = frame->this->private;
+ if (!priv->consistent_io)
+ return;
- priv = this->private;
+ if (local->event_generation &&
+ (local->event_generation != priv->event_generation))
+ goto inconsistent;
- loc_wipe (&local->loc);
- loc_wipe (&local->newloc);
+ return;
+inconsistent:
+ *op_ret = -1;
+ *op_errno = ENOTCONN;
+}
- if (local->fd)
- fd_unref (local->fd);
+void
+afr_local_cleanup(afr_local_t *local, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
- if (local->xattr_req)
- dict_unref (local->xattr_req);
+ if (!local)
+ return;
- if (local->xattr_rsp)
- dict_unref (local->xattr_rsp);
+ syncbarrier_destroy(&local->barrier);
- if (local->dict)
- dict_unref (local->dict);
+ afr_local_transaction_cleanup(local, this);
- afr_local_replies_wipe (local, priv);
- GF_FREE(local->replies);
+ priv = this->private;
- GF_FREE (local->child_up);
+ loc_wipe(&local->loc);
+ loc_wipe(&local->newloc);
- GF_FREE (local->read_attempted);
+ if (local->fd)
+ fd_unref(local->fd);
- GF_FREE (local->readable);
+ if (local->xattr_req)
+ dict_unref(local->xattr_req);
- if (local->inode)
- inode_unref (local->inode);
+ if (local->xattr_rsp)
+ dict_unref(local->xattr_rsp);
- if (local->parent)
- inode_unref (local->parent);
+ if (local->dict)
+ dict_unref(local->dict);
- if (local->parent2)
- inode_unref (local->parent2);
+ afr_local_replies_wipe(local, priv);
+ GF_FREE(local->replies);
- if (local->refreshinode)
- inode_unref (local->refreshinode);
+ GF_FREE(local->child_up);
- { /* getxattr */
- GF_FREE (local->cont.getxattr.name);
- }
+ GF_FREE(local->read_attempted);
+
+ GF_FREE(local->readable);
+ GF_FREE(local->readable2);
+
+ if (local->inode)
+ inode_unref(local->inode);
- { /* lk */
- GF_FREE (local->cont.lk.locked_nodes);
- }
+ if (local->parent)
+ inode_unref(local->parent);
- { /* create */
- if (local->cont.create.fd)
- fd_unref (local->cont.create.fd);
- if (local->cont.create.params)
- dict_unref (local->cont.create.params);
- }
+ if (local->parent2)
+ inode_unref(local->parent2);
- { /* mknod */
- if (local->cont.mknod.params)
- dict_unref (local->cont.mknod.params);
- }
+ if (local->refreshinode)
+ inode_unref(local->refreshinode);
- { /* mkdir */
- if (local->cont.mkdir.params)
- dict_unref (local->cont.mkdir.params);
- }
+ { /* getxattr */
+ GF_FREE(local->cont.getxattr.name);
+ }
- { /* symlink */
- if (local->cont.symlink.params)
- dict_unref (local->cont.symlink.params);
- }
+ { /* lk */
+ GF_FREE(local->cont.lk.locked_nodes);
+ GF_FREE(local->cont.lk.dom_locked_nodes);
+ GF_FREE(local->cont.lk.dom_lock_op_ret);
+ GF_FREE(local->cont.lk.dom_lock_op_errno);
+ }
- { /* writev */
- GF_FREE (local->cont.writev.vector);
- if (local->cont.writev.iobref)
- iobref_unref (local->cont.writev.iobref);
- }
+ { /* create */
+ if (local->cont.create.fd)
+ fd_unref(local->cont.create.fd);
+ if (local->cont.create.params)
+ dict_unref(local->cont.create.params);
+ }
- { /* setxattr */
- if (local->cont.setxattr.dict)
- dict_unref (local->cont.setxattr.dict);
- }
+ { /* mknod */
+ if (local->cont.mknod.params)
+ dict_unref(local->cont.mknod.params);
+ }
- { /* fsetxattr */
- if (local->cont.fsetxattr.dict)
- dict_unref (local->cont.fsetxattr.dict);
- }
+ { /* mkdir */
+ if (local->cont.mkdir.params)
+ dict_unref(local->cont.mkdir.params);
+ }
- { /* removexattr */
- GF_FREE (local->cont.removexattr.name);
- }
- { /* xattrop */
- if (local->cont.xattrop.xattr)
- dict_unref (local->cont.xattrop.xattr);
- }
- { /* symlink */
- GF_FREE (local->cont.symlink.linkpath);
- }
+ { /* symlink */
+ if (local->cont.symlink.params)
+ dict_unref(local->cont.symlink.params);
+ }
- { /* opendir */
- GF_FREE (local->cont.opendir.checksum);
- }
+ { /* writev */
+ GF_FREE(local->cont.writev.vector);
+ if (local->cont.writev.iobref)
+ iobref_unref(local->cont.writev.iobref);
+ }
- { /* readdirp */
- if (local->cont.readdir.dict)
- dict_unref (local->cont.readdir.dict);
- }
+ { /* setxattr */
+ if (local->cont.setxattr.dict)
+ dict_unref(local->cont.setxattr.dict);
+ }
- { /* inodelk */
- GF_FREE (local->cont.inodelk.volume);
- }
+ { /* fsetxattr */
+ if (local->cont.fsetxattr.dict)
+ dict_unref(local->cont.fsetxattr.dict);
+ }
- if (local->xdata_req)
- dict_unref (local->xdata_req);
+ { /* removexattr */
+ GF_FREE(local->cont.removexattr.name);
+ }
+ { /* xattrop */
+ if (local->cont.xattrop.xattr)
+ dict_unref(local->cont.xattrop.xattr);
+ }
+ { /* symlink */
+ GF_FREE(local->cont.symlink.linkpath);
+ }
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
+ { /* opendir */
+ GF_FREE(local->cont.opendir.checksum);
+ }
+
+ { /* open */
+ if (local->cont.open.fd)
+ fd_unref(local->cont.open.fd);
+ }
+
+ { /* readdirp */
+ if (local->cont.readdir.dict)
+ dict_unref(local->cont.readdir.dict);
+ }
+
+ { /* inodelk */
+ GF_FREE(local->cont.inodelk.volume);
+ if (local->cont.inodelk.xdata)
+ dict_unref(local->cont.inodelk.xdata);
+ }
+
+ { /* entrylk */
+ GF_FREE(local->cont.entrylk.volume);
+ GF_FREE(local->cont.entrylk.basename);
+ if (local->cont.entrylk.xdata)
+ dict_unref(local->cont.entrylk.xdata);
+ }
+
+ if (local->xdata_req)
+ dict_unref(local->xdata_req);
+
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
}
-
int
-afr_frame_return (call_frame_t *frame)
+afr_frame_return(call_frame_t *frame)
{
- afr_local_t *local = NULL;
- int call_count = 0;
+ afr_local_t *local = NULL;
+ int call_count = 0;
- local = frame->local;
+ local = frame->local;
- LOCK (&frame->lock);
- {
- call_count = --local->call_count;
- }
- UNLOCK (&frame->lock);
+ LOCK(&frame->lock);
+ {
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
- return call_count;
+ return call_count;
}
-static char *afr_ignore_xattrs[] = {
- GLUSTERFS_OPEN_FD_COUNT,
- GLUSTERFS_PARENT_ENTRYLK,
- GLUSTERFS_ENTRYLK_COUNT,
- GLUSTERFS_INODELK_COUNT,
- GF_SELINUX_XATTR_KEY,
- QUOTA_SIZE_KEY,
- NULL
-};
+static char *afr_ignore_xattrs[] = {GF_SELINUX_XATTR_KEY, QUOTA_SIZE_KEY, NULL};
gf_boolean_t
-afr_is_xattr_ignorable (char *key)
+afr_is_xattr_ignorable(char *key)
{
- int i = 0;
+ int i = 0;
- if (!strncmp (key, AFR_XATTR_PREFIX, strlen(AFR_XATTR_PREFIX)))
- return _gf_true;
- for (i = 0; afr_ignore_xattrs[i]; i++) {
- if (!strcmp (key, afr_ignore_xattrs[i]))
- return _gf_true;
- }
- return _gf_false;
+ if (!strncmp(key, AFR_XATTR_PREFIX, SLEN(AFR_XATTR_PREFIX)))
+ return _gf_true;
+ for (i = 0; afr_ignore_xattrs[i]; i++) {
+ if (!strcmp(key, afr_ignore_xattrs[i]))
+ return _gf_true;
+ }
+ return _gf_false;
}
static gf_boolean_t
-afr_xattr_match (dict_t *this, char *key1, data_t *value1, void *data)
+afr_xattr_match_needed(dict_t *this, char *key1, data_t *value1, void *data)
{
- if (!afr_is_xattr_ignorable (key1))
- return _gf_true;
-
+ /* Ignore all non-disk (i.e. virtual) xattrs right away. */
+ if (!gf_is_valid_xattr_namespace(key1))
return _gf_false;
+
+ /* Ignore on-disk xattrs that AFR doesn't need to heal. */
+ if (!afr_is_xattr_ignorable(key1))
+ return _gf_true;
+
+ return _gf_false;
}
gf_boolean_t
-afr_xattrs_are_equal (dict_t *dict1, dict_t *dict2)
+afr_xattrs_are_equal(dict_t *dict1, dict_t *dict2)
{
- return are_dicts_equal (dict1, dict2, afr_xattr_match, NULL);
+ return are_dicts_equal(dict1, dict2, afr_xattr_match_needed, NULL);
}
static int
-afr_get_parent_read_subvol (xlator_t *this, inode_t *parent,
- struct afr_reply *replies, unsigned char *readable)
+afr_get_parent_read_subvol(xlator_t *this, inode_t *parent,
+ struct afr_reply *replies, unsigned char *readable)
{
- int i = 0;
- int par_read_subvol = -1;
- int par_read_subvol_iter = -1;
- afr_private_t *priv = NULL;
-
- priv = this->private;
-
- if (parent)
- par_read_subvol = afr_data_subvol_get (parent, this, 0, 0,
- NULL);
+ int i = 0;
+ int par_read_subvol = -1;
+ int par_read_subvol_iter = -1;
+ afr_private_t *priv = NULL;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
+ priv = this->private;
- if (replies[i].op_ret < 0)
- continue;
+ if (parent)
+ par_read_subvol = afr_data_subvol_get(parent, this, NULL, NULL, NULL,
+ NULL);
- if (par_read_subvol_iter == -1) {
- par_read_subvol_iter = i;
- continue;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
- if ((par_read_subvol_iter != par_read_subvol) && readable[i])
- par_read_subvol_iter = i;
+ if (replies[i].op_ret < 0)
+ continue;
- if (i == par_read_subvol)
- par_read_subvol_iter = i;
+ if (par_read_subvol_iter == -1) {
+ par_read_subvol_iter = i;
+ continue;
}
- /* At the end of the for-loop, the only reason why @par_read_subvol_iter
- * could be -1 is when this LOOKUP has failed on all sub-volumes.
- * So it is okay to send an arbitrary subvolume (0 in this case)
- * as parent read subvol.
- */
- if (par_read_subvol_iter == -1)
- par_read_subvol_iter = 0;
- return par_read_subvol_iter;
+ if ((par_read_subvol_iter != par_read_subvol) && readable[i])
+ par_read_subvol_iter = i;
+ if (i == par_read_subvol)
+ par_read_subvol_iter = i;
+ }
+ /* At the end of the for-loop, the only reason why @par_read_subvol_iter
+ * could be -1 is when this LOOKUP has failed on all sub-volumes.
+ * So it is okay to send an arbitrary subvolume (0 in this case)
+ * as parent read subvol.
+ */
+ if (par_read_subvol_iter == -1)
+ par_read_subvol_iter = 0;
+
+ return par_read_subvol_iter;
}
int
-afr_read_subvol_decide (inode_t *inode, xlator_t *this,
- afr_read_subvol_args_t *args)
+afr_read_subvol_decide(inode_t *inode, xlator_t *this,
+ afr_read_subvol_args_t *args, unsigned char *readable)
{
- int data_subvol = -1;
- int mdata_subvol = -1;
+ int event = 0;
+ afr_private_t *priv = NULL;
+ unsigned char *intersection = NULL;
+
+ priv = this->private;
+ intersection = alloca0(priv->child_count);
+
+ afr_readables_intersect_get(inode, this, &event, intersection);
+
+ if (AFR_COUNT(intersection, priv->child_count) <= 0) {
+ /* TODO: If we have one brick with valid data_readable and
+ * another with metadata_readable, try to send an iatt with
+ * valid bits from both.*/
+ return -1;
+ }
- data_subvol = afr_data_subvol_get (inode, this,
- 0, 0, args);
- mdata_subvol = afr_metadata_subvol_get (inode, this,
- 0, 0, args);
- if (data_subvol == -1 || mdata_subvol == -1)
- return -1;
+ memcpy(readable, intersection, sizeof(*readable) * priv->child_count);
- return data_subvol;
+ return afr_read_subvol_select_by_policy(inode, this, intersection, args);
}
static inline int
-afr_first_up_child (call_frame_t *frame, xlator_t *this)
+afr_first_up_child(call_frame_t *frame, xlator_t *this)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++)
- if (local->replies[i].valid &&
- local->replies[i].op_ret == 0)
- return i;
- return 0;
+ for (i = 0; i < priv->child_count; i++)
+ if (local->replies[i].valid && local->replies[i].op_ret == 0)
+ return i;
+ return -1;
}
static void
-afr_lookup_done (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = -1;
- int op_errno = 0;
- int read_subvol = 0;
- int par_read_subvol = 0;
- unsigned char *readable = NULL;
- int event = 0;
- struct afr_reply *replies = NULL;
- uuid_t read_gfid = {0, };
- gf_boolean_t locked_entry = _gf_false;
- gf_boolean_t can_interpret = _gf_true;
- inode_t *parent = NULL;
- int spb_choice = -1;
- ia_type_t ia_type = IA_INVAL;
- afr_read_subvol_args_t args = {0,};
-
- priv = this->private;
- local = frame->local;
- replies = local->replies;
- parent = local->loc.parent;
-
- locked_entry = afr_is_possibly_under_txn (AFR_ENTRY_TRANSACTION, local,
- this);
-
- readable = alloca0 (priv->child_count);
-
- afr_inode_read_subvol_get (parent, this, readable, NULL, &event);
-
- afr_inode_split_brain_choice_get (local->inode, this,
- &spb_choice);
- /* First, check if we have a gfid-change from somewhere,
- If so, propagate that so that a fresh lookup can be
- issued
- */
- if (local->cont.lookup.needs_fresh_lookup) {
- local->op_ret = -1;
- local->op_errno = ESTALE;
- goto unwind;
- }
-
- op_errno = afr_final_errno (frame->local, this->private);
- local->op_errno = op_errno;
-
- read_subvol = -1;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
-
- if (locked_entry && replies[i].op_ret == -1 &&
- replies[i].op_errno == ENOENT) {
- /* Second, check entry is still
- "underway" in creation */
- local->op_ret = -1;
- local->op_errno = ENOENT;
- goto unwind;
- }
-
- if (replies[i].op_ret == -1)
- continue;
-
- if (read_subvol == -1 || !readable[read_subvol]) {
- read_subvol = i;
- gf_uuid_copy (read_gfid, replies[i].poststat.ia_gfid);
- ia_type = replies[i].poststat.ia_type;
- local->op_ret = 0;
- }
- }
-
- if (read_subvol == -1)
- goto unwind;
- /* We now have a read_subvol, which is readable[] (if there
- were any). Next we look for GFID mismatches. We don't
- consider a GFID mismatch as an error if read_subvol is
- readable[] but the mismatching GFID subvol is not.
- */
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid || replies[i].op_ret == -1) {
- if (priv->child_up[i])
- can_interpret = _gf_false;
- continue;
- }
-
- if (!gf_uuid_compare (replies[i].poststat.ia_gfid, read_gfid))
- continue;
-
- can_interpret = _gf_false;
-
- if (locked_entry)
- continue;
-
- /* Now GFIDs mismatch. It's OK as long as this subvol
- is not readable[] but read_subvol is */
- if (readable[read_subvol] && !readable[i])
- continue;
-
- /* LOG ERROR */
- local->op_ret = -1;
- local->op_errno = EIO;
- goto unwind;
- }
-
- /* Forth, for the finalized GFID, pick the best subvolume
- to return stats from.
- */
- if (can_interpret) {
- /* It is safe to call afr_replies_interpret() because we have
- a response from all the UP subvolumes and all of them resolved
- to the same GFID
- */
- gf_uuid_copy (args.gfid, read_gfid);
- args.ia_type = ia_type;
- if (afr_replies_interpret (frame, this, local->inode, NULL)) {
- read_subvol = afr_read_subvol_decide (local->inode,
- this, &args);
- afr_inode_read_subvol_reset (local->inode, this);
- goto cant_interpret;
- } else {
- read_subvol = afr_data_subvol_get (local->inode, this,
- 0, 0, &args);
- }
- } else {
- cant_interpret:
- if (read_subvol == -1) {
- if (spb_choice >= 0)
- read_subvol = spb_choice;
- else
- read_subvol = afr_first_up_child (frame, this);
- }
- dict_del (replies[read_subvol].xdata, GF_CONTENT_KEY);
- }
+afr_attempt_readsubvol_set(call_frame_t *frame, xlator_t *this,
+ unsigned char *success_replies,
+ unsigned char *data_readable, int *read_subvol)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int spb_subvol = -1;
+ int child_count = -1;
- afr_handle_quota_size (frame, this);
+ if (*read_subvol != -1)
+ return;
-unwind:
- afr_set_need_heal (this, local);
- if (read_subvol == -1) {
- if (spb_choice >= 0)
- read_subvol = spb_choice;
- else
- read_subvol = afr_first_up_child (frame, this);
+ priv = this->private;
+ local = frame->local;
+ child_count = priv->child_count;
+
+ afr_split_brain_read_subvol_get(local->inode, this, frame, &spb_subvol);
+ if ((spb_subvol >= 0) &&
+ (AFR_COUNT(success_replies, child_count) == child_count)) {
+ *read_subvol = spb_subvol;
+ } else if (!priv->quorum_count ||
+ frame->root->pid == GF_CLIENT_PID_GLFS_HEAL) {
+ *read_subvol = afr_first_up_child(frame, this);
+ } else if (priv->quorum_count &&
+ afr_has_quorum(data_readable, this, NULL)) {
+ /* read_subvol is guaranteed to be valid if we hit this path. */
+ *read_subvol = afr_first_up_child(frame, this);
+ } else {
+ /* If quorum is enabled and we do not have a
+ readable yet, it means all good copies are down.
+ */
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_READ_SUBVOL_ERROR,
+ "no read "
+ "subvols for %s",
+ local->loc.path);
+ }
+ if (*read_subvol >= 0)
+ dict_del_sizen(local->replies[*read_subvol].xdata, GF_CONTENT_KEY);
+}
+static void
+afr_lookup_done(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = -1;
+ int op_errno = 0;
+ int read_subvol = 0;
+ int par_read_subvol = 0;
+ int ret = -1;
+ unsigned char *readable = NULL;
+ unsigned char *success_replies = NULL;
+ int event = 0;
+ struct afr_reply *replies = NULL;
+ uuid_t read_gfid = {
+ 0,
+ };
+ gf_boolean_t locked_entry = _gf_false;
+ gf_boolean_t in_flight_create = _gf_false;
+ gf_boolean_t can_interpret = _gf_true;
+ inode_t *parent = NULL;
+ ia_type_t ia_type = IA_INVAL;
+ afr_read_subvol_args_t args = {
+ 0,
+ };
+ char *gfid_heal_msg = NULL;
+
+ priv = this->private;
+ local = frame->local;
+ replies = local->replies;
+ parent = local->loc.parent;
+
+ locked_entry = afr_is_possibly_under_txn(AFR_ENTRY_TRANSACTION, local,
+ this);
+
+ readable = alloca0(priv->child_count);
+ success_replies = alloca0(priv->child_count);
+
+ afr_inode_read_subvol_get(parent, this, readable, NULL, &event);
+ par_read_subvol = afr_get_parent_read_subvol(this, parent, replies,
+ readable);
+
+ /* First, check if we have a gfid-change from somewhere,
+ If so, propagate that so that a fresh lookup can be
+ issued
+ */
+ if (local->cont.lookup.needs_fresh_lookup) {
+ local->op_ret = -1;
+ local->op_errno = ESTALE;
+ goto error;
+ }
+
+ op_errno = afr_final_errno(frame->local, this->private);
+ local->op_errno = op_errno;
+
+ read_subvol = -1;
+ afr_fill_success_replies(local, priv, success_replies);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+
+ if (replies[i].op_ret == -1) {
+ if (locked_entry && replies[i].op_errno == ENOENT) {
+ in_flight_create = _gf_true;
+ }
+ continue;
+ }
+
+ if (read_subvol == -1 || !readable[read_subvol]) {
+ read_subvol = i;
+ gf_uuid_copy(read_gfid, replies[i].poststat.ia_gfid);
+ ia_type = replies[i].poststat.ia_type;
+ local->op_ret = 0;
+ }
+ }
+
+ if (in_flight_create && !afr_has_quorum(success_replies, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = ENOENT;
+ goto error;
+ }
+
+ if (read_subvol == -1)
+ goto error;
+ /* We now have a read_subvol, which is readable[] (if there
+ were any). Next we look for GFID mismatches. We don't
+ consider a GFID mismatch as an error if read_subvol is
+ readable[] but the mismatching GFID subvol is not.
+ */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1) {
+ continue;
+ }
+
+ if (!gf_uuid_compare(replies[i].poststat.ia_gfid, read_gfid))
+ continue;
+
+ can_interpret = _gf_false;
+
+ if (locked_entry)
+ continue;
+
+ /* Now GFIDs mismatch. It's OK as long as this subvol
+ is not readable[] but read_subvol is */
+ if (readable[read_subvol] && !readable[i])
+ continue;
+
+ /* If we were called from glfsheal and there is still a gfid
+ * mismatch, succeed the lookup and let glfsheal print the
+ * response via gfid-heal-msg.*/
+ if (!dict_get_str_sizen(local->xattr_req, "gfid-heal-msg",
+ &gfid_heal_msg))
+ goto cant_interpret;
+
+ /* LOG ERROR */
+ local->op_ret = -1;
+ local->op_errno = EIO;
+ goto error;
+ }
+
+ /* Forth, for the finalized GFID, pick the best subvolume
+ to return stats from.
+ */
+ read_subvol = -1;
+ memset(readable, 0, sizeof(*readable) * priv->child_count);
+ if (can_interpret) {
+ if (!afr_has_quorum(success_replies, this, NULL))
+ goto cant_interpret;
+ /* It is safe to call afr_replies_interpret() because we have
+ a response from all the UP subvolumes and all of them resolved
+ to the same GFID
+ */
+ gf_uuid_copy(args.gfid, read_gfid);
+ args.ia_type = ia_type;
+ ret = afr_replies_interpret(frame, this, local->inode, NULL);
+ read_subvol = afr_read_subvol_decide(local->inode, this, &args,
+ readable);
+ if (read_subvol == -1)
+ goto cant_interpret;
+ if (ret) {
+ afr_inode_need_refresh_set(local->inode, this);
+ dict_del_sizen(local->replies[read_subvol].xdata, GF_CONTENT_KEY);
}
- par_read_subvol = afr_get_parent_read_subvol (this, parent, replies,
- readable);
- if (AFR_IS_ARBITER_BRICK (priv, read_subvol) && local->op_ret == 0) {
- local->op_ret = -1;
- local->op_errno = ENOTCONN;
+ } else {
+ cant_interpret:
+ afr_attempt_readsubvol_set(frame, this, success_replies, readable,
+ &read_subvol);
+ if (read_subvol == -1) {
+ goto error;
}
+ }
+
+ afr_handle_quota_size(frame, this);
- AFR_STACK_UNWIND (lookup, frame, local->op_ret, local->op_errno,
- local->inode, &local->replies[read_subvol].poststat,
- local->replies[read_subvol].xdata,
- &local->replies[par_read_subvol].postparent);
+ afr_set_need_heal(this, local);
+ if (AFR_IS_ARBITER_BRICK(priv, read_subvol) && local->op_ret == 0) {
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ gf_msg_debug(this->name, 0,
+ "Arbiter cannot be a read subvol "
+ "for %s",
+ local->loc.path);
+ goto error;
+ }
+
+ ret = dict_get_str_sizen(local->xattr_req, "gfid-heal-msg", &gfid_heal_msg);
+ if (!ret) {
+ ret = dict_set_str_sizen(local->replies[read_subvol].xdata,
+ "gfid-heal-msg", gfid_heal_msg);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_DICT_SET_FAILED,
+ "Error setting gfid-heal-msg dict");
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ }
+ }
+
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno,
+ local->inode, &local->replies[read_subvol].poststat,
+ local->replies[read_subvol].xdata,
+ &local->replies[par_read_subvol].postparent);
+ return;
+
+error:
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno, NULL, NULL,
+ NULL, NULL);
}
/*
@@ -1815,654 +3122,845 @@ unwind:
* others in that they must be given higher priority while
* returning to the user.
*
- * The hierarchy is ENODATA > ENOENT > ESTALE > others
+ * The hierarchy is ENODATA > ENOENT > ESTALE > ENOSPC others
*/
int
-afr_higher_errno (int32_t old_errno, int32_t new_errno)
+afr_higher_errno(int32_t old_errno, int32_t new_errno)
{
- if (old_errno == ENODATA || new_errno == ENODATA)
- return ENODATA;
- if (old_errno == ENOENT || new_errno == ENOENT)
- return ENOENT;
- if (old_errno == ESTALE || new_errno == ESTALE)
- return ESTALE;
+ if (old_errno == ENODATA || new_errno == ENODATA)
+ return ENODATA;
+ if (old_errno == ENOENT || new_errno == ENOENT)
+ return ENOENT;
+ if (old_errno == ESTALE || new_errno == ESTALE)
+ return ESTALE;
+ if (old_errno == ENOSPC || new_errno == ENOSPC)
+ return ENOSPC;
- return new_errno;
+ return new_errno;
}
-
int
-afr_final_errno (afr_local_t *local, afr_private_t *priv)
+afr_final_errno(afr_local_t *local, afr_private_t *priv)
{
- int i = 0;
- int op_errno = 0;
- int tmp_errno = 0;
+ int i = 0;
+ int op_errno = 0;
+ int tmp_errno = 0;
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret >= 0)
- continue;
- tmp_errno = local->replies[i].op_errno;
- op_errno = afr_higher_errno (op_errno, tmp_errno);
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret >= 0)
+ continue;
+ tmp_errno = local->replies[i].op_errno;
+ op_errno = afr_higher_errno(op_errno, tmp_errno);
+ }
- return op_errno;
+ return op_errno;
}
static int32_t
-afr_local_discovery_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict,
- dict_t *xdata)
-{
- int ret = 0;
- char *pathinfo = NULL;
- gf_boolean_t is_local = _gf_false;
- afr_private_t *priv = NULL;
- int32_t child_index = -1;
-
- if (op_ret != 0) {
- goto out;
- }
-
- priv = this->private;
- child_index = (int32_t)(long)cookie;
-
- ret = dict_get_str (dict, GF_XATTR_PATHINFO_KEY, &pathinfo);
- if (ret != 0) {
- goto out;
- }
-
- ret = glusterfs_is_local_pathinfo (pathinfo, &is_local);
- if (ret) {
- goto out;
- }
-
- /*
- * Note that one local subvolume will override another here. The only
- * way to avoid that would be to retain extra information about whether
- * the previous read_child is local, and it's just not worth it. Even
- * the slowest local subvolume is far preferable to a remote one.
- */
- if (is_local) {
- priv->local[child_index] = 1;
- /* Don't set arbiter as read child. */
- if (AFR_IS_ARBITER_BRICK(priv, child_index))
- goto out;
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_LOCAL_CHILD, "selecting local read_child %s",
- priv->children[child_index]->name);
-
- priv->read_child = child_index;
- }
+afr_local_discovery_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
+{
+ int ret = 0;
+ char *pathinfo = NULL;
+ gf_boolean_t is_local = _gf_false;
+ afr_private_t *priv = NULL;
+ int32_t child_index = -1;
+
+ if (op_ret != 0) {
+ goto out;
+ }
+
+ priv = this->private;
+ child_index = (int32_t)(long)cookie;
+
+ ret = dict_get_str_sizen(dict, GF_XATTR_PATHINFO_KEY, &pathinfo);
+ if (ret != 0) {
+ goto out;
+ }
+
+ ret = glusterfs_is_local_pathinfo(pathinfo, &is_local);
+ if (ret) {
+ goto out;
+ }
+
+ /*
+ * Note that one local subvolume will override another here. The only
+ * way to avoid that would be to retain extra information about whether
+ * the previous read_child is local, and it's just not worth it. Even
+ * the slowest local subvolume is far preferable to a remote one.
+ */
+ if (is_local) {
+ priv->local[child_index] = 1;
+ /* Don't set arbiter as read child. */
+ if (AFR_IS_ARBITER_BRICK(priv, child_index))
+ goto out;
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_LOCAL_CHILD,
+ "selecting local read_child %s",
+ priv->children[child_index]->name);
+
+ priv->read_child = child_index;
+ }
out:
- STACK_DESTROY(frame->root);
- return 0;
+ STACK_DESTROY(frame->root);
+ return 0;
}
static void
-afr_attempt_local_discovery (xlator_t *this, int32_t child_index)
+afr_attempt_local_discovery(xlator_t *this, int32_t child_index)
{
- call_frame_t *newframe = NULL;
- loc_t tmploc = {0,};
- afr_private_t *priv = this->private;
+ call_frame_t *newframe = NULL;
+ loc_t tmploc = {
+ 0,
+ };
+ afr_private_t *priv = this->private;
- newframe = create_frame(this,this->ctx->pool);
- if (!newframe) {
- return;
- }
+ newframe = create_frame(this, this->ctx->pool);
+ if (!newframe) {
+ return;
+ }
- tmploc.gfid[sizeof(tmploc.gfid)-1] = 1;
- STACK_WIND_COOKIE (newframe, afr_local_discovery_cbk,
- (void *)(long)child_index,
- priv->children[child_index],
- priv->children[child_index]->fops->getxattr,
- &tmploc, GF_XATTR_PATHINFO_KEY, NULL);
+ tmploc.gfid[sizeof(tmploc.gfid) - 1] = 1;
+ STACK_WIND_COOKIE(newframe, afr_local_discovery_cbk,
+ (void *)(long)child_index, priv->children[child_index],
+ priv->children[child_index]->fops->getxattr, &tmploc,
+ GF_XATTR_PATHINFO_KEY, NULL);
}
int
-afr_lookup_sh_metadata_wrap (void *opaque)
-{
- call_frame_t *frame = opaque;
- afr_local_t *local = NULL;
- xlator_t *this = NULL;
- inode_t *inode = NULL;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- int i= 0, first = -1;
- int ret = -1;
- dict_t *dict = NULL;
+afr_lookup_sh_metadata_wrap(void *opaque)
+{
+ call_frame_t *frame = opaque;
+ afr_local_t *local = NULL;
+ xlator_t *this = NULL;
+ inode_t *inode = NULL;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ int i = 0, first = -1;
+ int ret = -1;
+ dict_t *dict = NULL;
+
+ local = frame->local;
+ this = frame->this;
+ priv = this->private;
+ replies = local->replies;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ first = i;
+ break;
+ }
+ if (first == -1)
+ goto out;
+
+ if (afr_selfheal_metadata_by_stbuf(this, &replies[first].poststat))
+ goto out;
+
+ afr_local_replies_wipe(local, this->private);
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ if (local->xattr_req) {
+ dict_copy(local->xattr_req, dict);
+ }
+
+ ret = dict_set_sizen_str_sizen(dict, "link-count", GF_XATTROP_INDEX_COUNT);
+ if (ret) {
+ gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
+ }
+
+ if (loc_is_nameless(&local->loc)) {
+ ret = afr_selfheal_unlocked_discover_on(frame, local->inode,
+ local->loc.gfid, local->replies,
+ local->child_up, dict);
+ } else {
+ inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
+ local->loc.name, local->replies,
+ local->child_up, dict);
+ }
+ if (inode)
+ inode_unref(inode);
+out:
+ if (loc_is_nameless(&local->loc))
+ afr_discover_done(frame, this);
+ else
+ afr_lookup_done(frame, this);
- local = frame->local;
- this = frame->this;
- priv = this->private;
- replies = local->replies;
-
- for (i =0; i < priv->child_count; i++) {
- if(!replies[i].valid || replies[i].op_ret == -1)
- continue;
- first = i;
- break;
- }
- if (first == -1)
- goto out;
+ if (dict)
+ dict_unref(dict);
- inode = inode_link (local->inode, NULL, NULL, &replies[first].poststat);
- if(!inode)
- goto out;
+ return 0;
+}
- afr_selfheal_metadata (frame, this, inode);
- inode_unref (inode);
+gf_boolean_t
+afr_is_pending_set(xlator_t *this, dict_t *xdata, int type)
+{
+ int idx = -1;
+ afr_private_t *priv = NULL;
+ void *pending_raw = NULL;
+ int *pending_int = NULL;
+ int i = 0;
- afr_local_replies_wipe (local, this->private);
+ priv = this->private;
+ idx = afr_index_for_transaction_type(type);
- dict = dict_new ();
- if (!dict)
- goto out;
- ret = dict_set_str (dict, "link-count", GF_XATTROP_INDEX_COUNT);
- if (ret) {
- gf_msg_debug (this->name, -ret,
- "Unable to set link-count in dict ");
+ if (dict_get_ptr(xdata, AFR_DIRTY, &pending_raw) == 0) {
+ if (pending_raw) {
+ pending_int = pending_raw;
+
+ if (ntoh32(pending_int[idx]))
+ return _gf_true;
}
+ }
- inode = afr_selfheal_unlocked_lookup_on (frame, local->loc.parent,
- local->loc.name, local->replies,
- local->child_up, dict);
- if (inode)
- inode_unref (inode);
-out:
- afr_lookup_done (frame, this);
+ for (i = 0; i < priv->child_count; i++) {
+ if (dict_get_ptr(xdata, priv->pending_key[i], &pending_raw))
+ continue;
+ if (!pending_raw)
+ continue;
+ pending_int = pending_raw;
- if (dict)
- dict_unref (dict);
+ if (ntoh32(pending_int[idx]))
+ return _gf_true;
+ }
- return 0;
+ return _gf_false;
}
static gf_boolean_t
afr_can_start_metadata_self_heal(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- int i = 0, first = -1;
- gf_boolean_t start = _gf_false;
- struct iatt stbuf = {0, };
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ int i = 0, first = -1;
+ gf_boolean_t start = _gf_false;
+ struct iatt stbuf = {
+ 0,
+ };
- local = frame->local;
- replies = local->replies;
- priv = this->private;
+ local = frame->local;
+ replies = local->replies;
+ priv = this->private;
- if (!priv->metadata_self_heal)
- return _gf_false;
+ if (!priv->metadata_self_heal)
+ return _gf_false;
- for (i = 0; i < priv->child_count; i++) {
- if(!replies[i].valid || replies[i].op_ret == -1)
- continue;
- if (first == -1) {
- first = i;
- stbuf = replies[i].poststat;
- continue;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ if (first == -1) {
+ first = i;
+ stbuf = replies[i].poststat;
+ continue;
+ }
- if (gf_uuid_compare (stbuf.ia_gfid, replies[i].poststat.ia_gfid)) {
- start = _gf_false;
- break;
- }
- if (!IA_EQUAL (stbuf, replies[i].poststat, type)) {
- start = _gf_false;
- break;
- }
+ if (afr_is_pending_set(this, replies[i].xdata,
+ AFR_METADATA_TRANSACTION)) {
+ /* Let shd do the heal so that lookup is not blocked
+ * on getting metadata lock/doing the heal */
+ start = _gf_false;
+ break;
+ }
- /*Check if iattrs need heal*/
- if ((!IA_EQUAL (stbuf, replies[i].poststat, uid)) ||
- (!IA_EQUAL (stbuf, replies[i].poststat, gid)) ||
- (!IA_EQUAL (stbuf, replies[i].poststat, prot))) {
- start = _gf_true;
- continue;
- }
+ if (gf_uuid_compare(stbuf.ia_gfid, replies[i].poststat.ia_gfid)) {
+ start = _gf_false;
+ break;
+ }
+ if (!IA_EQUAL(stbuf, replies[i].poststat, type)) {
+ start = _gf_false;
+ break;
+ }
- /*Check if xattrs need heal*/
- if (!afr_xattrs_are_equal (replies[first].xdata,
- replies[i].xdata))
- start = _gf_true;
+ /*Check if iattrs need heal*/
+ if ((!IA_EQUAL(stbuf, replies[i].poststat, uid)) ||
+ (!IA_EQUAL(stbuf, replies[i].poststat, gid)) ||
+ (!IA_EQUAL(stbuf, replies[i].poststat, prot))) {
+ start = _gf_true;
+ continue;
}
- return start;
+ /*Check if xattrs need heal*/
+ if (!afr_xattrs_are_equal(replies[first].xdata, replies[i].xdata))
+ start = _gf_true;
+ }
+
+ return start;
}
int
-afr_lookup_metadata_heal_check (call_frame_t *frame, xlator_t *this)
+afr_lookup_metadata_heal_check(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *heal = NULL;
- int ret = 0;
+ call_frame_t *heal = NULL;
+ afr_local_t *local = NULL;
+ int ret = 0;
- if (!afr_can_start_metadata_self_heal (frame, this))
- goto out;
+ local = frame->local;
+ if (!afr_can_start_metadata_self_heal(frame, this))
+ goto out;
- heal = copy_frame (frame);
- if (heal)
- heal->root->pid = GF_CLIENT_PID_SELF_HEALD;
- ret = synctask_new (this->ctx->env, afr_lookup_sh_metadata_wrap,
- afr_refresh_selfheal_done, heal, frame);
- if(ret)
- goto out;
- return ret;
+ heal = afr_frame_create(this, &ret);
+ if (!heal) {
+ ret = -ret;
+ goto out;
+ }
+
+ ret = synctask_new(this->ctx->env, afr_lookup_sh_metadata_wrap,
+ afr_refresh_selfheal_done, heal, frame);
+ if (ret)
+ goto out;
+ return ret;
out:
- afr_lookup_done (frame, this);
- return ret;
+ if (loc_is_nameless(&local->loc))
+ afr_discover_done(frame, this);
+ else
+ afr_lookup_done(frame, this);
+ if (heal)
+ AFR_STACK_DESTROY(heal);
+ return ret;
}
int
-afr_lookup_selfheal_wrap (void *opaque)
+afr_lookup_selfheal_wrap(void *opaque)
{
- int ret = 0;
- call_frame_t *frame = opaque;
- afr_local_t *local = NULL;
- xlator_t *this = NULL;
- inode_t *inode = NULL;
+ int ret = 0;
+ call_frame_t *frame = opaque;
+ afr_local_t *local = NULL;
+ xlator_t *this = NULL;
+ inode_t *inode = NULL;
+ uuid_t pargfid = {
+ 0,
+ };
- local = frame->local;
- this = frame->this;
+ local = frame->local;
+ this = frame->this;
+ loc_pargfid(&local->loc, pargfid);
- ret = afr_selfheal_name (frame->this, local->loc.pargfid,
- local->loc.name, &local->cont.lookup.gfid_req);
- if (ret == -EIO)
- goto unwind;
+ ret = afr_selfheal_name(frame->this, pargfid, local->loc.name,
+ &local->cont.lookup.gfid_req, local->xattr_req);
+ if (ret == -EIO)
+ goto unwind;
- afr_local_replies_wipe (local, this->private);
+ afr_local_replies_wipe(local, this->private);
- inode = afr_selfheal_unlocked_lookup_on (frame, local->loc.parent,
- local->loc.name, local->replies,
- local->child_up, NULL);
- if (inode)
- inode_unref (inode);
+ inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
+ local->loc.name, local->replies,
+ local->child_up, local->xattr_req);
+ if (inode)
+ inode_unref(inode);
- afr_lookup_metadata_heal_check(frame, this);
- return 0;
+ afr_lookup_metadata_heal_check(frame, this);
+ return 0;
unwind:
- AFR_STACK_UNWIND (lookup, frame, -1, EIO, NULL, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, EIO, NULL, NULL, NULL, NULL);
+ return 0;
}
-
int
-afr_lookup_entry_heal (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- call_frame_t *heal = NULL;
- int i = 0, first = -1;
- gf_boolean_t need_heal = _gf_false;
- struct afr_reply *replies = NULL;
- int ret = 0;
-
- local = frame->local;
- replies = local->replies;
- priv = this->private;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
-
- if ((replies[i].op_ret == -1) &&
- (replies[i].op_errno == ENODATA))
- need_heal = _gf_true;
-
- if (first == -1) {
- first = i;
- continue;
- }
-
- if (replies[i].op_ret != replies[first].op_ret) {
- need_heal = _gf_true;
- break;
- }
-
- if (gf_uuid_compare (replies[i].poststat.ia_gfid,
- replies[first].poststat.ia_gfid)) {
- need_heal = _gf_true;
- break;
- }
- }
-
- if (need_heal) {
- heal = copy_frame (frame);
- if (heal)
- heal->root->pid = GF_CLIENT_PID_SELF_HEALD;
- ret = synctask_new (this->ctx->env, afr_lookup_selfheal_wrap,
- afr_refresh_selfheal_done, heal, frame);
- if (ret)
- goto metadata_heal;
- return ret;
- }
-metadata_heal:
- ret = afr_lookup_metadata_heal_check (frame, this);
-
- return ret;
-}
+afr_lookup_entry_heal(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ call_frame_t *heal = NULL;
+ int i = 0, first = -1;
+ gf_boolean_t name_state_mismatch = _gf_false;
+ struct afr_reply *replies = NULL;
+ int ret = 0;
+ unsigned char *par_readables = NULL;
+ unsigned char *success = NULL;
+ int32_t op_errno = 0;
+ uuid_t gfid = {0};
+
+ local = frame->local;
+ replies = local->replies;
+ priv = this->private;
+ par_readables = alloca0(priv->child_count);
+ success = alloca0(priv->child_count);
+
+ ret = afr_inode_read_subvol_get(local->loc.parent, this, par_readables,
+ NULL, NULL);
+ if (ret < 0 || AFR_COUNT(par_readables, priv->child_count) == 0) {
+ /* In this case set par_readables to all 1 so that name_heal
+ * need checks at the end of this function will flag missing
+ * entry when name state mismatches*/
+ memset(par_readables, 1, priv->child_count);
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+
+ if (replies[i].op_ret == 0) {
+ if (gf_uuid_is_null(gfid)) {
+ gf_uuid_copy(gfid, replies[i].poststat.ia_gfid);
+ }
+ success[i] = 1;
+ } else {
+ if ((replies[i].op_errno != ENOTCONN) &&
+ (replies[i].op_errno != ENOENT) &&
+ (replies[i].op_errno != ESTALE)) {
+ op_errno = replies[i].op_errno;
+ }
+ }
+ /*gfid is missing, needs heal*/
+ if ((replies[i].op_ret == -1) && (replies[i].op_errno == ENODATA)) {
+ goto name_heal;
+ }
-int
-afr_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, inode_t *inode, struct iatt *buf,
- dict_t *xdata, struct iatt *postparent)
-{
- afr_local_t * local = NULL;
- int call_count = -1;
- int child_index = -1;
- GF_UNUSED int ret = 0;
- int8_t need_heal = 1;
-
- child_index = (long) cookie;
-
- local = frame->local;
-
- local->replies[child_index].valid = 1;
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
- /*
- * On revalidate lookup if the gfid-changed, afr should unwind the fop
- * with ESTALE so that a fresh lookup will be sent by the top xlator.
- * So remember it.
- */
- if (xdata && dict_get (xdata, "gfid-changed"))
- local->cont.lookup.needs_fresh_lookup = _gf_true;
+ if (first == -1) {
+ first = i;
+ continue;
+ }
- if (xdata) {
- ret = dict_get_int8 (xdata, "link-count", &need_heal);
- local->replies[child_index].need_heal = need_heal;
- } else {
- local->replies[child_index].need_heal = need_heal;
+ if (replies[i].op_ret != replies[first].op_ret) {
+ name_state_mismatch = _gf_true;
}
- if (op_ret != -1) {
- local->replies[child_index].poststat = *buf;
- local->replies[child_index].postparent = *postparent;
- if (xdata)
- local->replies[child_index].xdata = dict_ref (xdata);
- }
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- afr_set_need_heal (this, local);
- afr_lookup_entry_heal (frame, this);
+ if (replies[i].op_ret == 0) {
+ /* Rename after this lookup may succeed if we don't do
+ * a name-heal and the destination may not have pending xattrs
+ * to indicate which name is good and which is bad so always do
+ * this heal*/
+ if (gf_uuid_compare(replies[i].poststat.ia_gfid, gfid)) {
+ goto name_heal;
+ }
}
+ }
+
+ if (name_state_mismatch) {
+ if (!priv->quorum_count)
+ goto name_heal;
+ if (!afr_has_quorum(success, this, NULL))
+ goto name_heal;
+ if (op_errno)
+ goto name_heal;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+ if (par_readables[i] && replies[i].op_ret < 0 &&
+ replies[i].op_errno != ENOTCONN) {
+ goto name_heal;
+ }
+ }
+ }
+
+ goto metadata_heal;
+
+name_heal:
+ heal = afr_frame_create(this, NULL);
+ if (!heal)
+ goto metadata_heal;
+
+ ret = synctask_new(this->ctx->env, afr_lookup_selfheal_wrap,
+ afr_refresh_selfheal_done, heal, frame);
+ if (ret) {
+ AFR_STACK_DESTROY(heal);
+ goto metadata_heal;
+ }
+ return ret;
+
+metadata_heal:
+ ret = afr_lookup_metadata_heal_check(frame, this);
- return 0;
+ return ret;
}
+int
+afr_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
+{
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ int child_index = -1;
+ GF_UNUSED int ret = 0;
+ int8_t need_heal = 1;
+
+ child_index = (long)cookie;
+
+ local = frame->local;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ /*
+ * On revalidate lookup if the gfid-changed, afr should unwind the fop
+ * with ESTALE so that a fresh lookup will be sent by the top xlator.
+ * So remember it.
+ */
+ if (xdata && dict_get_sizen(xdata, "gfid-changed"))
+ local->cont.lookup.needs_fresh_lookup = _gf_true;
+
+ if (xdata) {
+ ret = dict_get_int8(xdata, "link-count", &need_heal);
+ local->replies[child_index].need_heal = need_heal;
+ } else {
+ local->replies[child_index].need_heal = need_heal;
+ }
+ if (op_ret != -1) {
+ local->replies[child_index].poststat = *buf;
+ local->replies[child_index].postparent = *postparent;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
+ }
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ afr_set_need_heal(this, local);
+ afr_lookup_entry_heal(frame, this);
+ }
+
+ return 0;
+}
static void
-afr_discover_done (call_frame_t *frame, xlator_t *this)
+afr_discover_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = -1;
- int op_errno = 0;
- int spb_choice = -1;
- int read_subvol = -1;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int read_subvol = -1;
+ int ret = 0;
+ unsigned char *data_readable = NULL;
+ unsigned char *success_replies = NULL;
- priv = this->private;
- local = frame->local;
+ priv = this->private;
+ local = frame->local;
+ data_readable = alloca0(priv->child_count);
+ success_replies = alloca0(priv->child_count);
- afr_inode_split_brain_choice_get (local->inode, this,
- &spb_choice);
+ afr_fill_success_replies(local, priv, success_replies);
+ if (AFR_COUNT(success_replies, priv->child_count) > 0)
+ local->op_ret = 0;
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret == 0)
- local->op_ret = 0;
- }
+ if (local->op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(frame->local, this->private);
+ goto error;
+ }
- op_errno = afr_final_errno (frame->local, this->private);
+ if (!afr_has_quorum(success_replies, this, frame))
+ goto unwind;
- if (local->op_ret < 0) {
- local->op_errno = op_errno;
- local->op_ret = -1;
- goto unwind;
- }
+ ret = afr_replies_interpret(frame, this, local->inode, NULL);
+ if (ret) {
+ afr_inode_need_refresh_set(local->inode, this);
+ }
- afr_replies_interpret (frame, this, local->inode, NULL);
+ read_subvol = afr_read_subvol_decide(local->inode, this, NULL,
+ data_readable);
- read_subvol = afr_read_subvol_decide (local->inode, this, NULL);
- if (read_subvol == -1) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_READ_SUBVOL_ERROR, "no read subvols for %s",
- local->loc.path);
+unwind:
+ afr_attempt_readsubvol_set(frame, this, success_replies, data_readable,
+ &read_subvol);
+ if (read_subvol == -1)
+ goto error;
- if (spb_choice >= 0) {
- read_subvol = spb_choice;
- } else {
- read_subvol = afr_first_up_child (frame, this);
- }
- }
+ if (AFR_IS_ARBITER_BRICK(priv, read_subvol) && local->op_ret == 0) {
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ gf_msg_debug(this->name, 0,
+ "Arbiter cannot be a read subvol "
+ "for %s",
+ local->loc.path);
+ }
-unwind:
- if (read_subvol == -1) {
- if (spb_choice >= 0)
- read_subvol = spb_choice;
- else
- read_subvol = afr_first_up_child (frame, this);
- }
- if (AFR_IS_ARBITER_BRICK (priv, read_subvol) && local->op_ret == 0) {
- local->op_ret = -1;
- local->op_errno = ENOTCONN;
- }
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno,
+ local->inode, &local->replies[read_subvol].poststat,
+ local->replies[read_subvol].xdata,
+ &local->replies[read_subvol].postparent);
+ return;
- AFR_STACK_UNWIND (lookup, frame, local->op_ret, local->op_errno,
- local->inode, &local->replies[read_subvol].poststat,
- local->replies[read_subvol].xdata,
- &local->replies[read_subvol].postparent);
+error:
+ AFR_STACK_UNWIND(lookup, frame, local->op_ret, local->op_errno, NULL, NULL,
+ NULL, NULL);
}
+static int
+afr_ta_id_file_check(void *opaque)
+{
+ afr_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ loc_t loc = {
+ 0,
+ };
+ struct iatt stbuf = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ uuid_t gfid = {
+ 0,
+ };
+ fd_t *fd = NULL;
+ int ret = 0;
+
+ this = opaque;
+ priv = this->private;
+
+ ret = afr_fill_ta_loc(this, &loc, _gf_false);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to populate thin-arbiter loc for: %s.", loc.name);
+ goto out;
+ }
+
+ ret = syncop_lookup(priv->children[THIN_ARBITER_BRICK_INDEX], &loc, &stbuf,
+ 0, 0, 0);
+ if (ret == 0) {
+ goto out;
+ } else if (ret == -ENOENT) {
+ fd = fd_create(loc.inode, getpid());
+ if (!fd)
+ goto out;
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ gf_uuid_generate(gfid);
+ ret = dict_set_gfuuid(dict, "gfid-req", gfid, true);
+ ret = syncop_create(priv->children[THIN_ARBITER_BRICK_INDEX], &loc,
+ O_RDWR, 0664, fd, &stbuf, dict, NULL);
+ }
-int
-afr_discover_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, inode_t *inode, struct iatt *buf,
- dict_t *xdata, struct iatt *postparent)
-{
- afr_local_t * local = NULL;
- int call_count = -1;
- int child_index = -1;
- GF_UNUSED int ret = 0;
- int8_t need_heal = 1;
-
- child_index = (long) cookie;
-
- local = frame->local;
-
- local->replies[child_index].valid = 1;
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
- if (op_ret != -1) {
- local->replies[child_index].poststat = *buf;
- local->replies[child_index].postparent = *postparent;
- if (xdata)
- local->replies[child_index].xdata = dict_ref (xdata);
- }
-
- if (local->do_discovery && (op_ret == 0))
- afr_attempt_local_discovery (this, child_index);
-
- if (xdata) {
- ret = dict_get_int8 (xdata, "link-count", &need_heal);
- local->replies[child_index].need_heal = need_heal;
- } else {
- local->replies[child_index].need_heal = need_heal;
- }
-
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- afr_set_need_heal (this, local);
- afr_discover_done (frame, this);
- }
+out:
+ if (ret == 0) {
+ gf_uuid_copy(priv->ta_gfid, stbuf.ia_gfid);
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to lookup/create thin-arbiter id file.");
+ }
+ if (dict)
+ dict_unref(dict);
+ if (fd)
+ fd_unref(fd);
+ loc_wipe(&loc);
- return 0;
+ return 0;
}
+static int
+afr_ta_id_file_check_cbk(int ret, call_frame_t *ta_frame, void *opaque)
+{
+ return 0;
+}
-int
-afr_discover_do (call_frame_t *frame, xlator_t *this, int err)
+static void
+afr_discover_done(call_frame_t *frame, xlator_t *this)
{
- int ret = 0;
- int i = 0;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = 0;
+ int ret = 0;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ priv = this->private;
+ if (!priv->thin_arbiter_count)
+ goto unwind;
+ if (!gf_uuid_is_null(priv->ta_gfid))
+ goto unwind;
- if (err) {
- local->op_errno = -err;
- ret = -1;
- goto out;
- }
+ ret = synctask_new(this->ctx->env, afr_ta_id_file_check,
+ afr_ta_id_file_check_cbk, NULL, this);
+ if (ret)
+ goto unwind;
+unwind:
+ afr_discover_unwind(frame, this);
+}
- call_count = local->call_count = AFR_COUNT (local->child_up,
- priv->child_count);
+int
+afr_discover_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
+{
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ int child_index = -1;
+ GF_UNUSED int ret = 0;
+ int8_t need_heal = 1;
+
+ child_index = (long)cookie;
+
+ local = frame->local;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (op_ret != -1) {
+ local->replies[child_index].poststat = *buf;
+ local->replies[child_index].postparent = *postparent;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
+ }
+
+ if (local->do_discovery && (op_ret == 0))
+ afr_attempt_local_discovery(this, child_index);
+
+ if (xdata) {
+ ret = dict_get_int8(xdata, "link-count", &need_heal);
+ local->replies[child_index].need_heal = need_heal;
+ } else {
+ local->replies[child_index].need_heal = need_heal;
+ }
+
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ afr_set_need_heal(this, local);
+ afr_lookup_metadata_heal_check(frame, this);
+ }
- ret = afr_lookup_xattr_req_prepare (local, this, local->xattr_req,
- &local->loc);
- if (ret) {
- local->op_errno = -ret;
- ret = -1;
- goto out;
- }
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_discover_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->lookup,
- &local->loc, local->xattr_req);
- if (!--call_count)
- break;
- }
+int
+afr_discover_do(call_frame_t *frame, xlator_t *this, int err)
+{
+ int ret = 0;
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (err) {
+ local->op_errno = err;
+ goto out;
+ }
+
+ call_count = local->call_count = AFR_COUNT(local->child_up,
+ priv->child_count);
+
+ ret = afr_lookup_xattr_req_prepare(local, this, local->xattr_req,
+ &local->loc);
+ if (ret) {
+ local->op_errno = -ret;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(
+ frame, afr_discover_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->lookup, &local->loc, local->xattr_req);
+ if (!--call_count)
+ break;
}
+ }
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
+ return 0;
}
-
int
-afr_discover (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+afr_discover(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
{
- int op_errno = ENOMEM;
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int event = 0;
+ int op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int event = 0;
- priv = this->private;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- if (!local->call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ if (!local->call_count) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
- if (__is_root_gfid (loc->inode->gfid)) {
- if (!this->itable)
- this->itable = loc->inode->table;
- if (!priv->root_inode)
- priv->root_inode = inode_ref (loc->inode);
+ if (__is_root_gfid(loc->inode->gfid)) {
+ if (!priv->root_inode)
+ priv->root_inode = inode_ref(loc->inode);
- if (priv->choose_local && !priv->did_discovery) {
- /* Logic to detect which subvolumes of AFR are
- local, in order to prefer them for reads
- */
- local->do_discovery = _gf_true;
- priv->did_discovery = _gf_true;
- }
- }
+ if (priv->choose_local && !priv->did_discovery) {
+ /* Logic to detect which subvolumes of AFR are
+ local, in order to prefer them for reads
+ */
+ local->do_discovery = _gf_true;
+ priv->did_discovery = _gf_true;
+ }
+ }
- local->op = GF_FOP_LOOKUP;
+ local->op = GF_FOP_LOOKUP;
- loc_copy (&local->loc, loc);
+ loc_copy(&local->loc, loc);
- local->inode = inode_ref (loc->inode);
+ local->inode = inode_ref(loc->inode);
- if (xattr_req)
- /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
- allocate one for us */
- local->xattr_req = dict_ref (xattr_req);
+ if (xattr_req) {
+ /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
+ allocate one for us */
+ local->xattr_req = dict_copy_with_ref(xattr_req, NULL);
+ if (!local->xattr_req) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+ }
- if (gf_uuid_is_null (loc->inode->gfid)) {
- afr_discover_do (frame, this, 0);
- return 0;
- }
+ if (gf_uuid_is_null(loc->inode->gfid)) {
+ afr_discover_do(frame, this, 0);
+ return 0;
+ }
- afr_read_subvol_get (loc->inode, this, NULL, &event,
- AFR_DATA_TRANSACTION, NULL);
+ afr_read_subvol_get(loc->inode, this, NULL, NULL, &event,
+ AFR_DATA_TRANSACTION, NULL);
- if (event != local->event_generation)
- afr_inode_refresh (frame, this, loc->inode, afr_discover_do);
- else
- afr_discover_do (frame, this, 0);
+ afr_discover_do(frame, this, 0);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ return 0;
}
-
int
-afr_lookup_do (call_frame_t *frame, xlator_t *this, int err)
-{
- int ret = 0;
- int i = 0;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = 0;
-
- local = frame->local;
- priv = this->private;
-
- if (err < 0) {
- local->op_errno = -err;
- ret = -1;
- goto out;
- }
-
- call_count = local->call_count = AFR_COUNT (local->child_up,
- priv->child_count);
-
- ret = afr_lookup_xattr_req_prepare (local, this, local->xattr_req,
- &local->loc);
- if (ret) {
- local->op_errno = -ret;
- ret = -1;
- goto out;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_lookup_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->lookup,
- &local->loc, local->xattr_req);
- if (!--call_count)
- break;
- }
+afr_lookup_do(call_frame_t *frame, xlator_t *this, int err)
+{
+ int ret = 0;
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (err < 0) {
+ local->op_errno = err;
+ goto out;
+ }
+
+ call_count = local->call_count = AFR_COUNT(local->child_up,
+ priv->child_count);
+
+ ret = afr_lookup_xattr_req_prepare(local, this, local->xattr_req,
+ &local->loc);
+ if (ret) {
+ local->op_errno = -ret;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(
+ frame, afr_lookup_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->lookup, &local->loc, local->xattr_req);
+ if (!--call_count)
+ break;
}
- return 0;
+ }
+ return 0;
out:
- AFR_STACK_UNWIND (lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
- return 0;
+ AFR_STACK_UNWIND(lookup, frame, -1, local->op_errno, 0, 0, 0, 0);
+ return 0;
}
/*
@@ -2502,2622 +4000,3879 @@ out:
*/
int
-afr_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
-{
- afr_local_t *local = NULL;
- int32_t op_errno = 0;
- int event = 0;
- void *gfid_req = NULL;
- int ret = 0;
-
- if (!loc->parent && gf_uuid_is_null (loc->pargfid)) {
- if (xattr_req)
- dict_del (xattr_req, "gfid-req");
- afr_discover (frame, this, loc, xattr_req);
- return 0;
- }
-
- if (__is_root_gfid (loc->parent->gfid)) {
- if (!strcmp (loc->name, GF_REPLICATE_TRASH_DIR)) {
- op_errno = EPERM;
- goto out;
- }
- }
-
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
-
- if (!local->call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- local->op = GF_FOP_LOOKUP;
-
- loc_copy (&local->loc, loc);
-
- local->inode = inode_ref (loc->inode);
-
- if (xattr_req) {
- /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
- allocate one for us */
- local->xattr_req = dict_copy_with_ref (xattr_req, NULL);
- if (!local->xattr_req) {
- op_errno = ENOMEM;
- goto out;
- }
- ret = dict_get_ptr (local->xattr_req, "gfid-req", &gfid_req);
- if (ret == 0) {
- gf_uuid_copy (local->cont.lookup.gfid_req, gfid_req);
- dict_del (local->xattr_req, "gfid-req");
- }
- }
+afr_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+{
+ afr_local_t *local = NULL;
+ int32_t op_errno = 0;
+ int event = 0;
+ int ret = 0;
- afr_read_subvol_get (loc->parent, this, NULL, &event,
- AFR_DATA_TRANSACTION, NULL);
+ if (loc_is_nameless(loc)) {
+ if (xattr_req)
+ dict_del_sizen(xattr_req, "gfid-req");
+ afr_discover(frame, this, loc, xattr_req);
+ return 0;
+ }
- if (event != local->event_generation)
- afr_inode_refresh (frame, this, loc->parent, afr_lookup_do);
- else
- afr_lookup_do (frame, this, 0);
+ if (afr_is_private_directory(this->private, loc->parent->gfid, loc->name,
+ frame->root->pid)) {
+ op_errno = EPERM;
+ goto out;
+ }
- return 0;
-out:
- AFR_STACK_UNWIND (lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- return 0;
-}
+ if (!local->call_count) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+ local->op = GF_FOP_LOOKUP;
-/* {{{ open */
+ loc_copy(&local->loc, loc);
-afr_fd_ctx_t *
-__afr_fd_ctx_get (fd_t *fd, xlator_t *this)
-{
- uint64_t ctx = 0;
- int ret = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
+ local->inode = inode_ref(loc->inode);
- ret = __fd_ctx_get (fd, this, &ctx);
+ if (xattr_req) {
+ /* If xattr_req was null, afr_lookup_xattr_req_prepare() will
+ allocate one for us */
+ local->xattr_req = dict_copy_with_ref(xattr_req, NULL);
+ if (!local->xattr_req) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+ ret = dict_get_gfuuid(local->xattr_req, "gfid-req",
+ &local->cont.lookup.gfid_req);
+ if (ret == 0) {
+ dict_del_sizen(local->xattr_req, "gfid-req");
+ }
+ }
- if (ret < 0) {
- ret = __afr_fd_ctx_set (this, fd);
- if (ret < 0)
- goto out;
+ afr_read_subvol_get(loc->parent, this, NULL, NULL, &event,
+ AFR_DATA_TRANSACTION, NULL);
- ret = __fd_ctx_get (fd, this, &ctx);
- if (ret < 0)
- goto out;
- }
+ afr_lookup_do(frame, this, 0);
- fd_ctx = (afr_fd_ctx_t *)(long) ctx;
+ return 0;
out:
- return fd_ctx;
-}
+ AFR_STACK_UNWIND(lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ return 0;
+}
-afr_fd_ctx_t *
-afr_fd_ctx_get (fd_t *fd, xlator_t *this)
+void
+_afr_cleanup_fd_ctx(xlator_t *this, afr_fd_ctx_t *fd_ctx)
{
- afr_fd_ctx_t *fd_ctx = NULL;
+ afr_private_t *priv = this->private;
- LOCK(&fd->lock);
+ if (fd_ctx->lk_heal_info) {
+ LOCK(&priv->lock);
{
- fd_ctx = __afr_fd_ctx_get (fd, this);
+ list_del(&fd_ctx->lk_heal_info->pos);
}
- UNLOCK(&fd->lock);
-
- return fd_ctx;
+ afr_lk_heal_info_cleanup(fd_ctx->lk_heal_info);
+ fd_ctx->lk_heal_info = NULL;
+ }
+ GF_FREE(fd_ctx->opened_on);
+ GF_FREE(fd_ctx);
+ return;
}
-
int
-__afr_fd_ctx_set (xlator_t *this, fd_t *fd)
+afr_cleanup_fd_ctx(xlator_t *this, fd_t *fd)
{
- afr_private_t * priv = NULL;
- int ret = -1;
- uint64_t ctx = 0;
- afr_fd_ctx_t * fd_ctx = NULL;
- int i = 0;
-
- VALIDATE_OR_GOTO (this->private, out);
- VALIDATE_OR_GOTO (fd, out);
-
- priv = this->private;
-
- ret = __fd_ctx_get (fd, this, &ctx);
-
- if (ret == 0)
- goto out;
-
- fd_ctx = GF_CALLOC (1, sizeof (afr_fd_ctx_t),
- gf_afr_mt_afr_fd_ctx_t);
- if (!fd_ctx) {
- ret = -ENOMEM;
- goto out;
- }
+ uint64_t ctx = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int ret = 0;
- for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++) {
- fd_ctx->pre_op_done[i] = GF_CALLOC (sizeof (*fd_ctx->pre_op_done[i]),
- priv->child_count,
- gf_afr_mt_int32_t);
- if (!fd_ctx->pre_op_done[i]) {
- ret = -ENOMEM;
- goto out;
- }
- }
-
- fd_ctx->opened_on = GF_CALLOC (sizeof (*fd_ctx->opened_on),
- priv->child_count,
- gf_afr_mt_int32_t);
- if (!fd_ctx->opened_on) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (fd_is_anonymous (fd))
- fd_ctx->opened_on[i] = AFR_FD_OPENED;
- else
- fd_ctx->opened_on[i] = AFR_FD_NOT_OPENED;
- }
-
- fd_ctx->lock_piggyback = GF_CALLOC (sizeof (*fd_ctx->lock_piggyback),
- priv->child_count,
- gf_afr_mt_char);
- if (!fd_ctx->lock_piggyback) {
- ret = -ENOMEM;
- goto out;
- }
-
- fd_ctx->lock_acquired = GF_CALLOC (sizeof (*fd_ctx->lock_acquired),
- priv->child_count,
- gf_afr_mt_char);
- if (!fd_ctx->lock_acquired) {
- ret = -ENOMEM;
- goto out;
- }
-
- fd_ctx->readdir_subvol = -1;
+ ret = fd_ctx_get(fd, this, &ctx);
+ if (ret < 0)
+ goto out;
- pthread_mutex_init (&fd_ctx->delay_lock, NULL);
+ fd_ctx = (afr_fd_ctx_t *)(long)ctx;
- INIT_LIST_HEAD (&fd_ctx->eager_locked);
+ if (fd_ctx) {
+ _afr_cleanup_fd_ctx(this, fd_ctx);
+ }
- ret = __fd_ctx_set (fd, this, (uint64_t)(long) fd_ctx);
- if (ret)
- gf_msg_debug (this->name, 0,
- "failed to set fd ctx (%p)", fd);
out:
- return ret;
+ return 0;
}
-
int
-afr_fd_ctx_set (xlator_t *this, fd_t *fd)
+afr_release(xlator_t *this, fd_t *fd)
{
- int ret = -1;
-
- LOCK (&fd->lock);
- {
- ret = __afr_fd_ctx_set (this, fd);
- }
- UNLOCK (&fd->lock);
+ afr_cleanup_fd_ctx(this, fd);
- return ret;
+ return 0;
}
-/* {{{ flush */
-
-int
-afr_flush_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_fd_ctx_t *
+__afr_fd_ctx_get(fd_t *fd, xlator_t *this)
{
- afr_local_t *local = NULL;
- int call_count = -1;
+ uint64_t ctx = 0;
+ int ret = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
- local = frame->local;
-
- LOCK (&frame->lock);
- {
- if (op_ret != -1) {
- local->op_ret = op_ret;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- } else {
- local->op_errno = op_errno;
- }
- }
- UNLOCK (&frame->lock);
+ ret = __fd_ctx_get(fd, this, &ctx);
- call_count = afr_frame_return (frame);
+ if (ret < 0) {
+ ret = __afr_fd_ctx_set(this, fd);
+ if (ret < 0)
+ goto out;
- if (call_count == 0)
- AFR_STACK_UNWIND (flush, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
+ ret = __fd_ctx_get(fd, this, &ctx);
+ if (ret < 0)
+ goto out;
+ }
- return 0;
+ fd_ctx = (afr_fd_ctx_t *)(long)ctx;
+out:
+ return fd_ctx;
}
-static int
-afr_flush_wrapper (call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+afr_fd_ctx_t *
+afr_fd_ctx_get(fd_t *fd, xlator_t *this)
{
- int i = 0;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = -1;
-
- priv = this->private;
- local = frame->local;
- call_count = local->call_count;
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_flush_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->flush,
- local->fd, xdata);
- if (!--call_count)
- break;
+ afr_fd_ctx_t *fd_ctx = NULL;
- }
- }
+ LOCK(&fd->lock);
+ {
+ fd_ctx = __afr_fd_ctx_get(fd, this);
+ }
+ UNLOCK(&fd->lock);
- return 0;
+ return fd_ctx;
}
int
-afr_flush (call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+__afr_fd_ctx_set(xlator_t *this, fd_t *fd)
{
- afr_local_t *local = NULL;
- call_stub_t *stub = NULL;
- int op_errno = ENOMEM;
+ afr_private_t *priv = NULL;
+ int ret = -1;
+ uint64_t ctx = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int i = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ VALIDATE_OR_GOTO(this->private, out);
+ VALIDATE_OR_GOTO(fd, out);
- if (!local->call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ priv = this->private;
- local->fd = fd_ref(fd);
+ ret = __fd_ctx_get(fd, this, &ctx);
- stub = fop_flush_stub (frame, afr_flush_wrapper, fd, xdata);
- if (!stub)
- goto out;
+ if (ret == 0)
+ goto out;
- afr_delayed_changelog_wake_resume (this, fd, stub);
+ fd_ctx = GF_CALLOC(1, sizeof(afr_fd_ctx_t), gf_afr_mt_afr_fd_ctx_t);
+ if (!fd_ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
- return 0;
+ fd_ctx->opened_on = GF_CALLOC(sizeof(*fd_ctx->opened_on), priv->child_count,
+ gf_afr_mt_int32_t);
+ if (!fd_ctx->opened_on) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (fd_is_anonymous(fd))
+ fd_ctx->opened_on[i] = AFR_FD_OPENED;
+ else
+ fd_ctx->opened_on[i] = AFR_FD_NOT_OPENED;
+ }
+
+ fd_ctx->readdir_subvol = -1;
+ fd_ctx->lk_heal_info = NULL;
+
+ ret = __fd_ctx_set(fd, this, (uint64_t)(long)fd_ctx);
+ if (ret)
+ gf_msg_debug(this->name, 0, "failed to set fd ctx (%p)", fd);
out:
- AFR_STACK_UNWIND (flush, frame, -1, op_errno, NULL);
- return 0;
+ if (ret && fd_ctx)
+ _afr_cleanup_fd_ctx(this, fd_ctx);
+ return ret;
}
-/* }}} */
-
+/* {{{ flush */
int
-afr_cleanup_fd_ctx (xlator_t *this, fd_t *fd)
+afr_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
{
- uint64_t ctx = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
- int ret = 0;
- int i = 0;
-
- ret = fd_ctx_get (fd, this, &ctx);
- if (ret < 0)
- goto out;
+ afr_local_t *local = NULL;
+ int call_count = -1;
- fd_ctx = (afr_fd_ctx_t *)(long) ctx;
+ local = frame->local;
- if (fd_ctx) {
- //no need to take any locks
- if (!list_empty (&fd_ctx->eager_locked))
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_INVALID_DATA, "%s: Stale "
- "Eager-lock stubs found",
- uuid_utoa (fd->inode->gfid));
-
- for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++)
- GF_FREE (fd_ctx->pre_op_done[i]);
-
- GF_FREE (fd_ctx->opened_on);
-
- GF_FREE (fd_ctx->lock_piggyback);
+ LOCK(&frame->lock);
+ {
+ if (op_ret != -1) {
+ local->op_ret = op_ret;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
+ } else {
+ local->op_errno = op_errno;
+ }
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
- GF_FREE (fd_ctx->lock_acquired);
+ if (call_count == 0)
+ AFR_STACK_UNWIND(flush, frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
- pthread_mutex_destroy (&fd_ctx->delay_lock);
+ return 0;
+}
- GF_FREE (fd_ctx);
+static int
+afr_flush_wrapper(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = -1;
+
+ priv = this->private;
+ local = frame->local;
+ call_count = local->call_count;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(frame, afr_flush_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->flush,
+ local->fd, xdata);
+ if (!--call_count)
+ break;
}
+ }
-out:
- return 0;
+ return 0;
}
-
-int
-afr_release (xlator_t *this, fd_t *fd)
+afr_local_t *
+afr_wakeup_same_fd_delayed_op(xlator_t *this, afr_lock_t *lock, fd_t *fd)
{
- afr_cleanup_fd_ctx (this, fd);
-
- return 0;
-}
+ afr_local_t *local = NULL;
+ if (lock->delay_timer) {
+ local = list_entry(lock->post_op.next, afr_local_t,
+ transaction.owner_list);
+ if (fd == local->fd) {
+ if (gf_timer_call_cancel(this->ctx, lock->delay_timer)) {
+ local = NULL;
+ } else {
+ lock->delay_timer = NULL;
+ }
+ } else {
+ local = NULL;
+ }
+ }
-/* {{{ fsync */
+ return local;
+}
-int
-afr_fsync_unwind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
-{
- AFR_STACK_UNWIND (fsync, frame, op_ret, op_errno, prebuf, postbuf,
- xdata);
- return 0;
+void
+afr_delayed_changelog_wake_resume(xlator_t *this, inode_t *inode,
+ call_stub_t *stub)
+{
+ afr_inode_ctx_t *ctx = NULL;
+ afr_lock_t *lock = NULL;
+ afr_local_t *metadata_local = NULL;
+ afr_local_t *data_local = NULL;
+ LOCK(&inode->lock);
+ {
+ (void)__afr_inode_ctx_get(this, inode, &ctx);
+ lock = &ctx->lock[AFR_DATA_TRANSACTION];
+ data_local = afr_wakeup_same_fd_delayed_op(this, lock, stub->args.fd);
+ lock = &ctx->lock[AFR_METADATA_TRANSACTION];
+ metadata_local = afr_wakeup_same_fd_delayed_op(this, lock,
+ stub->args.fd);
+ }
+ UNLOCK(&inode->lock);
+
+ if (data_local) {
+ data_local->transaction.resume_stub = stub;
+ } else if (metadata_local) {
+ metadata_local->transaction.resume_stub = stub;
+ } else {
+ call_resume(stub);
+ }
+ if (data_local) {
+ afr_delayed_changelog_wake_up_cbk(data_local);
+ }
+ if (metadata_local) {
+ afr_delayed_changelog_wake_up_cbk(metadata_local);
+ }
}
int
-afr_fsync_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
+afr_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_count = -1;
- int child_index = (long) cookie;
- int read_subvol = 0;
- call_stub_t *stub = NULL;
+ afr_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ int op_errno = ENOMEM;
- local = frame->local;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- read_subvol = afr_data_subvol_get (local->inode, this, 0, 0, NULL);
+ local->op = GF_FOP_FLUSH;
+ if (!afr_is_consistent_io_possible(local, this->private, &op_errno))
+ goto out;
- LOCK (&frame->lock);
- {
- if (op_ret == 0) {
- if (local->op_ret == -1) {
- local->op_ret = 0;
-
- local->cont.inode_wfop.prebuf = *prebuf;
- local->cont.inode_wfop.postbuf = *postbuf;
-
- if (xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
-
- if (child_index == read_subvol) {
- local->cont.inode_wfop.prebuf = *prebuf;
- local->cont.inode_wfop.postbuf = *postbuf;
- if (xdata) {
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
- local->xdata_rsp = dict_ref (xdata);
- }
- }
- } else {
- local->op_errno = op_errno;
- }
- }
- UNLOCK (&frame->lock);
-
- call_count = afr_frame_return (frame);
-
- if (call_count == 0) {
- /* Make a stub out of the frame, and register it
- with the waking up post-op. When the call-stub resumes,
- we are guaranteed that there was no post-op pending
- (i.e changelogs were unset in the server). This is an
- essential "guarantee", that fsync() returns only after
- completely finishing EVERYTHING, including the delayed
- post-op. This guarantee is expected by FUSE graph switching
- for example.
- */
- stub = fop_fsync_cbk_stub (frame, afr_fsync_unwind_cbk,
- local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf,
- local->xdata_rsp);
- if (!stub) {
- AFR_STACK_UNWIND (fsync, frame, -1, ENOMEM, 0, 0, 0);
- return 0;
- }
-
- /* If no new unstable writes happened between the
- time we cleared the unstable write witness flag in afr_fsync
- and now, calling afr_delayed_changelog_wake_up() should
- wake up and skip over the fsync phase and go straight to
- afr_changelog_post_op_now()
- */
- afr_delayed_changelog_wake_resume (this, local->fd, stub);
- }
+ local->fd = fd_ref(fd);
- return 0;
-}
+ stub = fop_flush_stub(frame, afr_flush_wrapper, fd, xdata);
+ if (!stub)
+ goto out;
+
+ afr_delayed_changelog_wake_resume(this, fd->inode, stub);
+ return 0;
+out:
+ AFR_STACK_UNWIND(flush, frame, -1, op_errno, NULL);
+ return 0;
+}
int
-afr_fsync (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
- dict_t *xdata)
+afr_fsyncdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
-
- priv = this->private;
+ afr_local_t *local = NULL;
+ int call_count = -1;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = frame->local;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- local->fd = fd_ref (fd);
+ LOCK(&frame->lock);
+ {
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
+ } else {
+ local->op_errno = op_errno;
+ }
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
- if (afr_fd_has_witnessed_unstable_write (this, fd)) {
- /* don't care. we only wanted to CLEAR the bit */
- }
+ if (call_count == 0)
+ AFR_STACK_UNWIND(fsyncdir, frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
- local->inode = inode_ref (fd->inode);
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_fsync_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->fsync,
- fd, datasync, xdata);
- if (!--call_count)
- break;
- }
+int
+afr_fsyncdir(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+ int32_t call_count = 0;
+ int32_t op_errno = ENOMEM;
+
+ priv = this->private;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_FSYNCDIR;
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
+
+ call_count = local->call_count;
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND(frame, afr_fsyncdir_cbk, priv->children[i],
+ priv->children[i]->fops->fsyncdir, fd, datasync, xdata);
+ if (!--call_count)
+ break;
}
+ }
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (fsync, frame, -1, op_errno, NULL, NULL, NULL);
+ AFR_STACK_UNWIND(fsyncdir, frame, -1, op_errno, NULL);
- return 0;
+ return 0;
}
/* }}} */
-/* {{{ fsync */
+static int
+afr_serialized_lock_wind(call_frame_t *frame, xlator_t *this);
-int
-afr_fsyncdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+static gf_boolean_t
+afr_is_conflicting_lock_present(int32_t op_ret, int32_t op_errno)
{
- afr_local_t *local = NULL;
- int call_count = -1;
+ if (op_ret == -1 && op_errno == EAGAIN)
+ return _gf_true;
+ return _gf_false;
+}
- local = frame->local;
+static void
+afr_fop_lock_unwind(call_frame_t *frame, glusterfs_fop_t op, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ switch (op) {
+ case GF_FOP_INODELK:
+ AFR_STACK_UNWIND(inodelk, frame, op_ret, op_errno, xdata);
+ break;
+ case GF_FOP_FINODELK:
+ AFR_STACK_UNWIND(finodelk, frame, op_ret, op_errno, xdata);
+ break;
+ case GF_FOP_ENTRYLK:
+ AFR_STACK_UNWIND(entrylk, frame, op_ret, op_errno, xdata);
+ break;
+ case GF_FOP_FENTRYLK:
+ AFR_STACK_UNWIND(fentrylk, frame, op_ret, op_errno, xdata);
+ break;
+ default:
+ break;
+ }
+}
- LOCK (&frame->lock);
- {
- if (op_ret == 0) {
- local->op_ret = 0;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- } else {
- local->op_errno = op_errno;
- }
- }
- UNLOCK (&frame->lock);
+static void
+afr_fop_lock_wind(call_frame_t *frame, xlator_t *this, int child_index,
+ int32_t (*lock_cbk)(call_frame_t *, void *, xlator_t *,
+ int32_t, int32_t, dict_t *))
+{
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int i = child_index;
+
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->inodelk,
+ (const char *)local->cont.inodelk.volume, &local->loc,
+ local->cont.inodelk.cmd, &local->cont.inodelk.flock,
+ local->cont.inodelk.xdata);
+ break;
+ case GF_FOP_FINODELK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->finodelk,
+ (const char *)local->cont.inodelk.volume, local->fd,
+ local->cont.inodelk.cmd, &local->cont.inodelk.flock,
+ local->cont.inodelk.xdata);
+ break;
+ case GF_FOP_ENTRYLK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->entrylk, local->cont.entrylk.volume,
+ &local->loc, local->cont.entrylk.basename,
+ local->cont.entrylk.cmd, local->cont.entrylk.type,
+ local->cont.entrylk.xdata);
+ break;
+ case GF_FOP_FENTRYLK:
+ STACK_WIND_COOKIE(
+ frame, lock_cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->fentrylk, local->cont.entrylk.volume,
+ local->fd, local->cont.entrylk.basename,
+ local->cont.entrylk.cmd, local->cont.entrylk.type,
+ local->cont.entrylk.xdata);
+ break;
+ default:
+ break;
+ }
+}
- call_count = afr_frame_return (frame);
+void
+afr_fop_lock_proceed(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- if (call_count == 0)
- AFR_STACK_UNWIND (fsyncdir, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
+ local = frame->local;
+ priv = frame->this->private;
- return 0;
+ if (local->fop_lock_state != AFR_FOP_LOCK_PARALLEL) {
+ afr_fop_lock_unwind(frame, local->op, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return;
+ }
+ /* At least one child is up */
+ /*
+ * Non-blocking locks also need to be serialized. Otherwise there is
+ * a chance that both the mounts which issued same non-blocking inodelk
+ * may endup not acquiring the lock on any-brick.
+ * Ex: Mount1 and Mount2
+ * request for full length lock on file f1. Mount1 afr may acquire the
+ * partial lock on brick-1 and may not acquire the lock on brick-2
+ * because Mount2 already got the lock on brick-2, vice versa. Since
+ * both the mounts only got partial locks, afr treats them as failure in
+ * gaining the locks and unwinds with EAGAIN errno.
+ */
+ local->op_ret = -1;
+ local->op_errno = EUCLEAN;
+ local->fop_lock_state = AFR_FOP_LOCK_SERIAL;
+ afr_local_replies_wipe(local, priv);
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = NULL;
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.cmd = local->cont.inodelk.in_cmd;
+ local->cont.inodelk.flock = local->cont.inodelk.in_flock;
+ if (local->cont.inodelk.xdata)
+ dict_unref(local->cont.inodelk.xdata);
+ local->cont.inodelk.xdata = NULL;
+ if (local->xdata_req)
+ local->cont.inodelk.xdata = dict_ref(local->xdata_req);
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.cmd = local->cont.entrylk.in_cmd;
+ if (local->cont.entrylk.xdata)
+ dict_unref(local->cont.entrylk.xdata);
+ local->cont.entrylk.xdata = NULL;
+ if (local->xdata_req)
+ local->cont.entrylk.xdata = dict_ref(local->xdata_req);
+ break;
+ default:
+ break;
+ }
+ afr_serialized_lock_wind(frame, frame->this);
}
+static int32_t
+afr_unlock_partial_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
-int
-afr_fsyncdir (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
- dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
-
- priv = this->private;
-
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = -1;
+ int child_index = (long)cookie;
+ uuid_t gfid = {0};
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ local = frame->local;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_fsyncdir_cbk,
- priv->children[i],
- priv->children[i]->fops->fsyncdir,
- fd, datasync, xdata);
- if (!--call_count)
- break;
- }
- }
+ if (op_ret < 0 && op_errno != ENOTCONN) {
+ if (local->fd)
+ gf_uuid_copy(gfid, local->fd->inode->gfid);
+ else
+ loc_gfid(&local->loc, gfid);
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_UNLOCK_FAIL,
+ "%s: Failed to unlock %s on %s "
+ "with lk_owner: %s",
+ uuid_utoa(gfid), gf_fop_list[local->op],
+ priv->children[child_index]->name,
+ lkowner_utoa(&frame->root->lk_owner));
+ }
- return 0;
-out:
- AFR_STACK_UNWIND (fsyncdir, frame, -1, op_errno, NULL);
+ call_count = afr_frame_return(frame);
+ if (call_count == 0)
+ afr_fop_lock_proceed(frame);
- return 0;
+ return 0;
}
-/* }}} */
+static int32_t
+afr_unlock_locks_and_proceed(call_frame_t *frame, xlator_t *this,
+ int call_count)
+{
+ int i = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+
+ if (call_count == 0) {
+ afr_fop_lock_proceed(frame);
+ goto out;
+ }
+
+ local = frame->local;
+ priv = this->private;
+ local->call_count = call_count;
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.flock.l_type = F_UNLCK;
+ local->cont.inodelk.cmd = F_SETLK;
+ if (local->cont.inodelk.xdata)
+ dict_unref(local->cont.inodelk.xdata);
+ local->cont.inodelk.xdata = NULL;
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.cmd = ENTRYLK_UNLOCK;
+ if (local->cont.entrylk.xdata)
+ dict_unref(local->cont.entrylk.xdata);
+ local->cont.entrylk.xdata = NULL;
+ break;
+ default:
+ break;
+ }
-int32_t
-afr_unlock_partial_inodelk_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret,
- int32_t op_errno, dict_t *xdata)
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = -1;
- int child_index = (long)cookie;
- uuid_t gfid = {0};
+ if (local->replies[i].op_ret == -1)
+ continue;
- local = frame->local;
- priv = this->private;
+ afr_fop_lock_wind(frame, this, i, afr_unlock_partial_lock_cbk);
- if (op_ret < 0 && op_errno != ENOTCONN) {
- loc_gfid (&local->loc, gfid);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INODE_UNLOCK_FAIL,
- "%s: Failed to unlock %s "
- "with lk_owner: %s (%s)", uuid_utoa (gfid),
- priv->children[child_index]->name,
- lkowner_utoa (&frame->root->lk_owner),
- strerror (op_errno));
- }
+ if (!--call_count)
+ break;
+ }
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- AFR_STACK_UNWIND (inodelk, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
- }
-
- return 0;
+out:
+ return 0;
}
int32_t
-afr_unlock_inodelks_and_unwind (call_frame_t *frame, xlator_t *this,
- int call_count)
+afr_fop_lock_done(call_frame_t *frame, xlator_t *this)
{
- int i = 0;
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
+ int i = 0;
+ int lock_count = 0;
+ unsigned char *success = NULL;
- local = frame->local;
- priv = this->private;
- local->call_count = call_count;
- local->cont.inodelk.flock.l_type = F_UNLCK;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
+ local = frame->local;
+ priv = this->private;
+ success = alloca0(priv->child_count);
- if (local->replies[i].op_ret == -1)
- continue;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
- STACK_WIND_COOKIE (frame, afr_unlock_partial_inodelk_cbk,
- (void*) (long) i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock, 0);
+ if (local->replies[i].op_ret == 0) {
+ lock_count++;
+ success[i] = 1;
+ }
+
+ if (local->op_ret == -1 && local->op_errno == EAGAIN)
+ continue;
- if (!--call_count)
- break;
+ if ((local->replies[i].op_ret == -1) &&
+ (local->replies[i].op_errno == EAGAIN)) {
+ local->op_ret = -1;
+ local->op_errno = EAGAIN;
+ continue;
}
- return 0;
+ if (local->replies[i].op_ret == 0)
+ local->op_ret = 0;
+
+ local->op_errno = local->replies[i].op_errno;
+ }
+
+ if (afr_fop_lock_is_unlock(frame))
+ goto unwind;
+
+ if (afr_is_conflicting_lock_present(local->op_ret, local->op_errno)) {
+ afr_unlock_locks_and_proceed(frame, this, lock_count);
+ } else if (priv->quorum_count && !afr_has_quorum(success, this, NULL)) {
+ local->fop_lock_state = AFR_FOP_LOCK_QUORUM_FAILED;
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
+ if (local->op_errno == 0)
+ local->op_errno = afr_quorum_errno(priv);
+ afr_unlock_locks_and_proceed(frame, this, lock_count);
+ } else {
+ goto unwind;
+ }
+
+ return 0;
+unwind:
+ afr_fop_lock_unwind(frame, local->op, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return 0;
}
-int32_t
-afr_inodelk_done (call_frame_t *frame, xlator_t *this)
+static int
+afr_common_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- int i = 0;
- int lock_count = 0;
-
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int child_index = (long)cookie;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (op_ret == 0 && xdata) {
+ local->replies[child_index].xdata = dict_ref(xdata);
+ LOCK(&frame->lock);
+ {
+ if (!local->xdata_rsp)
+ local->xdata_rsp = dict_ref(xdata);
+ }
+ UNLOCK(&frame->lock);
+ }
+ return 0;
+}
- if (local->replies[i].op_ret == 0)
- lock_count++;
+static int32_t
+afr_serialized_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
- if (local->op_ret == -1 && local->op_errno == EAGAIN)
- continue;
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int child_index = (long)cookie;
+ int next_child = 0;
- if ((local->replies[i].op_ret == -1) &&
- (local->replies[i].op_errno == EAGAIN)) {
- local->op_ret = -1;
- local->op_errno = EAGAIN;
- continue;
- }
+ local = frame->local;
+ priv = this->private;
- if (local->replies[i].op_ret == 0)
- local->op_ret = 0;
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
- local->op_errno = local->replies[i].op_errno;
- }
+ for (next_child = child_index + 1; next_child < priv->child_count;
+ next_child++) {
+ if (local->child_up[next_child])
+ break;
+ }
- if (lock_count && local->cont.inodelk.flock.l_type != F_UNLCK &&
- (local->op_ret == -1 && local->op_errno == EAGAIN)) {
- afr_unlock_inodelks_and_unwind (frame, this,
- lock_count);
- } else {
- AFR_STACK_UNWIND (inodelk, frame, local->op_ret,
- local->op_errno, local->xdata_rsp);
- }
+ if (afr_is_conflicting_lock_present(op_ret, op_errno) ||
+ (next_child == priv->child_count)) {
+ afr_fop_lock_done(frame, this);
+ } else {
+ afr_fop_lock_wind(frame, this, next_child, afr_serialized_lock_cbk);
+ }
- return 0;
+ return 0;
}
-int
-afr_common_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+static int
+afr_serialized_lock_wind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- int child_index = (long)cookie;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
- local = frame->local;
+ priv = this->private;
+ local = frame->local;
- local->replies[child_index].valid = 1;
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
- if (op_ret == 0 && xdata) {
- local->replies[child_index].xdata = dict_ref (xdata);
- LOCK (&frame->lock);
- {
- if (!local->xdata_rsp)
- local->xdata_rsp = dict_ref (xdata);
- }
- UNLOCK (&frame->lock);
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ afr_fop_lock_wind(frame, this, i, afr_serialized_lock_cbk);
+ break;
}
- return 0;
+ }
+ return 0;
}
static int32_t
-afr_parallel_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_parallel_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- int call_count = 0;
+ int call_count = 0;
- afr_common_inodelk_cbk (frame, cookie, this, op_ret, op_errno, xdata);
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
- call_count = afr_frame_return (frame);
- if (call_count == 0)
- afr_inodelk_done (frame, this);
+ call_count = afr_frame_return(frame);
+ if (call_count == 0)
+ afr_fop_lock_done(frame, this);
- return 0;
+ return 0;
}
-static gf_boolean_t
-afr_is_conflicting_lock_present (int32_t op_ret, int32_t op_errno)
+static int
+afr_parallel_lock_wind(call_frame_t *frame, xlator_t *this)
{
- if (op_ret == -1 && op_errno == EAGAIN)
- return _gf_true;
- return _gf_false;
-}
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int call_count = 0;
+ int i = 0;
-static int32_t
-afr_serialized_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+ priv = this->private;
+ local = frame->local;
+ call_count = local->call_count;
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int child_index = (long)cookie;
- int next_child = 0;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->child_up[i])
+ continue;
+ afr_fop_lock_wind(frame, this, i, afr_parallel_lock_cbk);
+ if (!--call_count)
+ break;
+ }
+ return 0;
+}
- local = frame->local;
- priv = this->private;
+static int
+afr_fop_handle_lock(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = frame->local;
+ int op_errno = 0;
- afr_common_inodelk_cbk (frame, cookie, this, op_ret, op_errno, xdata);
+ if (!afr_fop_lock_is_unlock(frame)) {
+ if (!afr_is_consistent_io_possible(local, this->private, &op_errno))
+ goto out;
- for (next_child = child_index + 1; next_child < priv->child_count;
- next_child++) {
- if (local->child_up[next_child])
- break;
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.cmd = F_SETLK;
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.cmd = ENTRYLK_LOCK_NB;
+ break;
+ default:
+ break;
}
+ }
- if (afr_is_conflicting_lock_present (op_ret, op_errno) ||
- (next_child == priv->child_count)) {
- afr_inodelk_done (frame, this);
- } else {
- STACK_WIND_COOKIE (frame, afr_serialized_inodelk_cbk,
- (void *) (long) next_child,
- priv->children[next_child],
- priv->children[next_child]->fops->inodelk,
- (const char *)local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock,
- local->xdata_req);
+ if (local->xdata_req) {
+ switch (local->op) {
+ case GF_FOP_INODELK:
+ case GF_FOP_FINODELK:
+ local->cont.inodelk.xdata = dict_ref(local->xdata_req);
+ break;
+ case GF_FOP_ENTRYLK:
+ case GF_FOP_FENTRYLK:
+ local->cont.entrylk.xdata = dict_ref(local->xdata_req);
+ break;
+ default:
+ break;
}
+ }
- return 0;
+ local->fop_lock_state = AFR_FOP_LOCK_PARALLEL;
+ afr_parallel_lock_wind(frame, this);
+out:
+ return -op_errno;
}
-static int
-afr_parallel_inodelk_wind (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int call_count = 0;
- int i = 0;
+static int32_t
+afr_handle_inodelk(call_frame_t *frame, xlator_t *this, glusterfs_fop_t fop,
+ const char *volume, loc_t *loc, fd_t *fd, int32_t cmd,
+ struct gf_flock *flock, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int32_t op_errno = ENOMEM;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = fop;
+ if (loc)
+ loc_copy(&local->loc, loc);
+ if (fd && (flock->l_type != F_UNLCK)) {
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local->fd = fd_ref(fd);
+ }
+
+ local->cont.inodelk.volume = gf_strdup(volume);
+ if (!local->cont.inodelk.volume) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->cont.inodelk.in_cmd = cmd;
+ local->cont.inodelk.cmd = cmd;
+ local->cont.inodelk.in_flock = *flock;
+ local->cont.inodelk.flock = *flock;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+
+ op_errno = -afr_fop_handle_lock(frame, frame->this);
+ if (op_errno)
+ goto out;
+ return 0;
+out:
+ afr_fop_lock_unwind(frame, fop, -1, op_errno, NULL);
- priv = this->private;
- local = frame->local;
- call_count = local->call_count;
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (!local->child_up[i])
- continue;
- STACK_WIND_COOKIE (frame, afr_parallel_inodelk_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- (const char *)local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock,
- local->xdata_req);
- if (!--call_count)
- break;
- }
- return 0;
+int32_t
+afr_inodelk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+{
+ afr_handle_inodelk(frame, this, GF_FOP_INODELK, volume, loc, NULL, cmd,
+ flock, xdata);
+ return 0;
+}
+
+int32_t
+afr_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+{
+ afr_handle_inodelk(frame, this, GF_FOP_FINODELK, volume, NULL, fd, cmd,
+ flock, xdata);
+ return 0;
}
static int
-afr_serialized_inodelk_wind (call_frame_t *frame, xlator_t *this)
+afr_handle_entrylk(call_frame_t *frame, xlator_t *this, glusterfs_fop_t fop,
+ const char *volume, loc_t *loc, fd_t *fd,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
+ afr_local_t *local = NULL;
+ int32_t op_errno = ENOMEM;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = fop;
+ if (loc)
+ loc_copy(&local->loc, loc);
+ if (fd && (cmd != ENTRYLK_UNLOCK)) {
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local->fd = fd_ref(fd);
+ }
+ local->cont.entrylk.cmd = cmd;
+ local->cont.entrylk.in_cmd = cmd;
+ local->cont.entrylk.type = type;
+ local->cont.entrylk.volume = gf_strdup(volume);
+ local->cont.entrylk.basename = gf_strdup(basename);
+ if (!local->cont.entrylk.volume || !local->cont.entrylk.basename) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+ op_errno = -afr_fop_handle_lock(frame, frame->this);
+ if (op_errno)
+ goto out;
+
+ return 0;
+out:
+ afr_fop_lock_unwind(frame, fop, -1, op_errno, NULL);
+ return 0;
+}
- priv = this->private;
- local = frame->local;
+int
+afr_entrylk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
+{
+ afr_handle_entrylk(frame, this, GF_FOP_ENTRYLK, volume, loc, NULL, basename,
+ cmd, type, xdata);
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_serialized_inodelk_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- (const char *)local->cont.inodelk.volume,
- &local->loc, local->cont.inodelk.cmd,
- &local->cont.inodelk.flock,
- local->xdata_req);
- break;
- }
- }
- return 0;
+int
+afr_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
+{
+ afr_handle_entrylk(frame, this, GF_FOP_FENTRYLK, volume, NULL, fd, basename,
+ cmd, type, xdata);
+ return 0;
}
-int32_t
-afr_inodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, int32_t cmd,
- struct gf_flock *flock, dict_t *xdata)
+int
+afr_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct statvfs *statvfs, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ int call_count = 0;
+ struct statvfs *buf = NULL;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = frame->local;
- loc_copy (&local->loc, loc);
- local->cont.inodelk.volume = gf_strdup (volume);
- if (!local->cont.inodelk.volume) {
- op_errno = ENOMEM;
- goto out;
+ LOCK(&frame->lock);
+ {
+ if (op_ret != 0) {
+ local->op_errno = op_errno;
+ goto unlock;
}
- local->cont.inodelk.cmd = cmd;
- local->cont.inodelk.flock = *flock;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
-
- /* At least one child is up */
- /*
- * Non-blocking locks also need to be serialized. Otherwise there is
- * a chance that both the mounts which issued same non-blocking inodelk
- * may endup not acquiring the lock on any-brick.
- * Ex: Mount1 and Mount2
- * request for full length lock on file f1. Mount1 afr may acquire the
- * partial lock on brick-1 and may not acquire the lock on brick-2
- * because Mount2 already got the lock on brick-2, vice versa. Since
- * both the mounts only got partial locks, afr treats them as failure in
- * gaining the locks and unwinds with EAGAIN errno.
- */
- if (flock->l_type == F_UNLCK) {
- afr_parallel_inodelk_wind (frame, this);
+ local->op_ret = op_ret;
+
+ buf = &local->cont.statfs.buf;
+ if (local->cont.statfs.buf_set) {
+ if (statvfs->f_bavail < buf->f_bavail) {
+ *buf = *statvfs;
+ if (xdata) {
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = dict_ref(xdata);
+ }
+ }
} else {
- afr_serialized_inodelk_wind (frame, this);
+ *buf = *statvfs;
+ local->cont.statfs.buf_set = 1;
+ if (xdata)
+ local->xdata_rsp = dict_ref(xdata);
}
+ }
+unlock:
+ call_count = --local->call_count;
+ UNLOCK(&frame->lock);
- return 0;
-out:
- AFR_STACK_UNWIND (inodelk, frame, -1, op_errno, NULL);
+ if (call_count == 0)
+ AFR_STACK_UNWIND(statfs, frame, local->op_ret, local->op_errno,
+ &local->cont.statfs.buf, local->xdata_rsp);
- return 0;
+ return 0;
}
+int
+afr_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_count = 0;
+ int32_t op_errno = ENOMEM;
+
+ priv = this->private;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_STATFS;
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
+
+ if (priv->arbiter_count == 1 && local->child_up[ARBITER_BRICK_INDEX])
+ local->call_count--;
+ call_count = local->call_count;
+ if (!call_count) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ if (AFR_IS_ARBITER_BRICK(priv, i))
+ continue;
+ STACK_WIND(frame, afr_statfs_cbk, priv->children[i],
+ priv->children[i]->fops->statfs, loc, xdata);
+ if (!--call_count)
+ break;
+ }
+ }
-int32_t
-afr_finodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-
-{
- afr_local_t *local = NULL;
- int call_count = -1;
+ return 0;
+out:
+ AFR_STACK_UNWIND(statfs, frame, -1, op_errno, NULL, NULL);
- local = frame->local;
+ return 0;
+}
- LOCK (&frame->lock);
- {
- if (op_ret == 0)
- local->op_ret = 0;
+int32_t
+afr_lk_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = this->private;
+ int call_count = -1;
+ int child_index = (long)cookie;
- local->op_errno = op_errno;
- }
- UNLOCK (&frame->lock);
+ local = frame->local;
- call_count = afr_frame_return (frame);
+ if (op_ret < 0 && op_errno != ENOTCONN && op_errno != EBADFD) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_UNLOCK_FAIL,
+ "gfid=%s: unlock failed on subvolume %s "
+ "with lock owner %s",
+ uuid_utoa(local->fd->inode->gfid),
+ priv->children[child_index]->name,
+ lkowner_utoa(&frame->root->lk_owner));
+ }
- if (call_count == 0)
- AFR_STACK_UNWIND (finodelk, frame, local->op_ret,
- local->op_errno, xdata);
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno, NULL,
+ local->xdata_rsp);
+ }
- return 0;
+ return 0;
}
-
int32_t
-afr_finodelk (call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
- int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+afr_lk_unlock(call_frame_t *frame, xlator_t *this)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_count = 0;
+
+ local = frame->local;
+ priv = this->private;
- priv = this->private;
+ call_count = afr_locked_nodes_count(local->cont.lk.locked_nodes,
+ priv->child_count);
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ if (call_count == 0) {
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno, NULL,
+ local->xdata_rsp);
+ return 0;
+ }
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ local->call_count = call_count;
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_finodelk_cbk,
- priv->children[i],
- priv->children[i]->fops->finodelk,
- volume, fd, cmd, flock, xdata);
-
- if (!--call_count)
- break;
- }
- }
+ local->cont.lk.user_flock.l_type = F_UNLCK;
- return 0;
-out:
- AFR_STACK_UNWIND (finodelk, frame, -1, op_errno, NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->cont.lk.locked_nodes[i]) {
+ STACK_WIND_COOKIE(frame, afr_lk_unlock_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->lk,
+ local->fd, F_SETLK, &local->cont.lk.user_flock,
+ NULL);
- return 0;
-}
+ if (!--call_count)
+ break;
+ }
+ }
+ return 0;
+}
int32_t
-afr_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_lk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_count = -1;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int child_index = -1;
- local = frame->local;
+ local = frame->local;
+ priv = this->private;
- LOCK (&frame->lock);
- {
- if (op_ret == 0)
- local->op_ret = 0;
+ child_index = (long)cookie;
- local->op_errno = op_errno;
- }
- UNLOCK (&frame->lock);
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
+ if (op_ret < 0 && op_errno == EAGAIN) {
+ local->op_ret = -1;
+ local->op_errno = EAGAIN;
- call_count = afr_frame_return (frame);
+ afr_lk_unlock(frame, this);
+ return 0;
+ }
+
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ local->op_errno = 0;
+ local->cont.lk.locked_nodes[child_index] = 1;
+ local->cont.lk.ret_flock = *lock;
+ }
+
+ child_index++;
+
+ if (child_index < priv->child_count) {
+ STACK_WIND_COOKIE(frame, afr_lk_cbk, (void *)(long)child_index,
+ priv->children[child_index],
+ priv->children[child_index]->fops->lk, local->fd,
+ local->cont.lk.cmd, &local->cont.lk.user_flock,
+ local->xdata_req);
+ } else if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lk.locked_nodes, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
- if (call_count == 0)
- AFR_STACK_UNWIND (entrylk, frame, local->op_ret,
- local->op_errno, xdata);
+ afr_lk_unlock(frame, this);
+ } else {
+ if (local->op_ret < 0)
+ local->op_errno = afr_final_errno(local, priv);
- return 0;
-}
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno,
+ &local->cont.lk.ret_flock, local->xdata_rsp);
+ }
+ return 0;
+}
int
-afr_entrylk (call_frame_t *frame, xlator_t *this, const char *volume,
- loc_t *loc, const char *basename, entrylk_cmd cmd,
- entrylk_type type, dict_t *xdata)
+afr_lk_transaction_cbk(int ret, call_frame_t *frame, void *opaque)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = 0;
+ return 0;
+}
+
+int
+afr_lk_txn_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int child_index = -1;
+
+ local = frame->local;
+ child_index = (long)cookie;
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ local->op_errno = 0;
+ local->cont.lk.locked_nodes[child_index] = 1;
+ local->cont.lk.ret_flock = *lock;
+ }
+ syncbarrier_wake(&local->barrier);
+ return 0;
+}
- priv = this->private;
+int
+afr_lk_txn_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
+ dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ int child_index = (long)cookie;
+
+ if (op_ret < 0 && op_errno != ENOTCONN && op_errno != EBADFD) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_UNLOCK_FAIL,
+ "gfid=%s: unlock failed on subvolume %s "
+ "with lock owner %s",
+ uuid_utoa(local->fd->inode->gfid),
+ priv->children[child_index]->name,
+ lkowner_utoa(&frame->root->lk_owner));
+ }
+ return 0;
+}
+int
+afr_lk_transaction(void *opaque)
+{
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ char *wind_on = NULL;
+ int op_errno = 0;
+ int i = 0;
+ int ret = 0;
+
+ frame = (call_frame_t *)opaque;
+ local = frame->local;
+ this = frame->this;
+ priv = this->private;
+ wind_on = alloca0(priv->child_count);
+
+ if (priv->arbiter_count || priv->child_count != 3) {
+ op_errno = ENOTSUP;
+ gf_msg(frame->this->name, GF_LOG_ERROR, op_errno, AFR_MSG_LK_HEAL_DOM,
+ "%s: Lock healing supported only for replica 3 volumes.",
+ uuid_utoa(local->fd->inode->gfid));
+ goto err;
+ }
+
+ op_errno = -afr_dom_lock_acquire(frame); // Released during
+ // AFR_STACK_UNWIND
+ if (op_errno != 0) {
+ goto err;
+ }
+ if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lk.dom_locked_nodes, this, NULL)) {
+ op_errno = afr_final_errno(local, priv);
+ goto err;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->child_up[i] && local->cont.lk.dom_locked_nodes[i])
+ wind_on[i] = 1;
+ }
+ AFR_ONLIST(wind_on, frame, afr_lk_txn_wind_cbk, lk, local->fd,
+ local->cont.lk.cmd, &local->cont.lk.user_flock,
+ local->xdata_req);
+
+ if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lk.locked_nodes, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
+ goto unlock;
+ } else {
+ if (local->cont.lk.user_flock.l_type == F_UNLCK)
+ ret = afr_remove_lock_from_saved_locks(local, this);
+ else
+ ret = afr_add_lock_to_saved_locks(frame, this);
+ if (ret) {
+ local->op_ret = -1;
+ local->op_errno = -ret;
+ goto unlock;
+ }
+ AFR_STACK_UNWIND(lk, frame, local->op_ret, local->op_errno,
+ &local->cont.lk.ret_flock, local->xdata_rsp);
+ }
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ return 0;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+unlock:
+ local->cont.lk.user_flock.l_type = F_UNLCK;
+ AFR_ONLIST(local->cont.lk.locked_nodes, frame, afr_lk_txn_unlock_cbk, lk,
+ local->fd, F_SETLK, &local->cont.lk.user_flock, NULL);
+err:
+ AFR_STACK_UNWIND(lk, frame, -1, op_errno, NULL, NULL);
+ return -1;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_entrylk_cbk,
- priv->children[i],
- priv->children[i]->fops->entrylk,
- volume, loc, basename, cmd, type, xdata);
-
- if (!--call_count)
- break;
- }
+int
+afr_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ struct gf_flock *flock, dict_t *xdata)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int ret = 0;
+ int i = 0;
+ int32_t op_errno = ENOMEM;
+
+ priv = this->private;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_LK;
+ if (!afr_lk_is_unlock(cmd, flock)) {
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
+ }
+
+ local->cont.lk.locked_nodes = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lk.locked_nodes),
+ gf_afr_mt_char);
+
+ if (!local->cont.lk.locked_nodes) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->fd = fd_ref(fd);
+ local->cont.lk.cmd = cmd;
+ local->cont.lk.user_flock = *flock;
+ local->cont.lk.ret_flock = *flock;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+
+ if (afr_is_lock_mode_mandatory(xdata)) {
+ ret = synctask_new(this->ctx->env, afr_lk_transaction,
+ afr_lk_transaction_cbk, frame, frame);
+ if (ret) {
+ op_errno = ENOMEM;
+ goto out;
}
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(frame, afr_lk_cbk, (void *)(long)0, priv->children[i],
+ priv->children[i]->fops->lk, fd, cmd, flock,
+ local->xdata_req);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (entrylk, frame, -1, op_errno, NULL);
+ AFR_STACK_UNWIND(lk, frame, -1, op_errno, NULL, NULL);
- return 0;
+ return 0;
}
+int32_t
+afr_lease_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_lease *lease,
+ dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ local = frame->local;
+ call_count = afr_frame_return(frame);
-int
-afr_fentrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+ if (call_count == 0)
+ AFR_STACK_UNWIND(lease, frame, local->op_ret, local->op_errno, lease,
+ xdata);
+
+ return 0;
+}
+int32_t
+afr_lease_unlock(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- int call_count = -1;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_count = 0;
- local = frame->local;
+ local = frame->local;
+ priv = this->private;
- LOCK (&frame->lock);
- {
- if (op_ret == 0)
- local->op_ret = 0;
+ call_count = afr_locked_nodes_count(local->cont.lease.locked_nodes,
+ priv->child_count);
- local->op_errno = op_errno;
- }
- UNLOCK (&frame->lock);
+ if (call_count == 0) {
+ AFR_STACK_UNWIND(lease, frame, local->op_ret, local->op_errno,
+ &local->cont.lease.ret_lease, NULL);
+ return 0;
+ }
- call_count = afr_frame_return (frame);
+ local->call_count = call_count;
- if (call_count == 0)
- AFR_STACK_UNWIND (fentrylk, frame, local->op_ret,
- local->op_errno, xdata);
+ local->cont.lease.user_lease.cmd = GF_UNLK_LEASE;
- return 0;
-}
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->cont.lease.locked_nodes[i]) {
+ STACK_WIND(frame, afr_lease_unlock_cbk, priv->children[i],
+ priv->children[i]->fops->lease, &local->loc,
+ &local->cont.lease.user_lease, NULL);
+ if (!--call_count)
+ break;
+ }
+ }
-int
-afr_fentrylk (call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
- const char *basename, entrylk_cmd cmd, entrylk_type type,
- dict_t *xdata)
+ return 0;
+}
+
+int32_t
+afr_lease_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct gf_lease *lease, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int child_index = -1;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ child_index = (long)cookie;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ afr_common_lock_cbk(frame, cookie, this, op_ret, op_errno, xdata);
+ if (op_ret < 0 && op_errno == EAGAIN) {
+ local->op_ret = -1;
+ local->op_errno = EAGAIN;
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND (frame, afr_fentrylk_cbk,
- priv->children[i],
- priv->children[i]->fops->fentrylk,
- volume, fd, basename, cmd, type, xdata);
-
- if (!--call_count)
- break;
- }
- }
+ afr_lease_unlock(frame, this);
+ return 0;
+ }
+
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ local->op_errno = 0;
+ local->cont.lease.locked_nodes[child_index] = 1;
+ local->cont.lease.ret_lease = *lease;
+ }
+
+ child_index++;
+ if (child_index < priv->child_count) {
+ STACK_WIND_COOKIE(frame, afr_lease_cbk, (void *)(long)child_index,
+ priv->children[child_index],
+ priv->children[child_index]->fops->lease, &local->loc,
+ &local->cont.lease.user_lease, xdata);
+ } else if (priv->quorum_count &&
+ !afr_has_quorum(local->cont.lease.locked_nodes, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
- return 0;
-out:
- AFR_STACK_UNWIND (fentrylk, frame, -1, op_errno, NULL);
+ afr_lease_unlock(frame, this);
+ } else {
+ if (local->op_ret < 0)
+ local->op_errno = afr_final_errno(local, priv);
+ AFR_STACK_UNWIND(lease, frame, local->op_ret, local->op_errno,
+ &local->cont.lease.ret_lease, NULL);
+ }
- return 0;
+ return 0;
}
-
int
-afr_statfs_cbk (call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
- int op_errno, struct statvfs *statvfs, dict_t *xdata)
+afr_lease(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct gf_lease *lease, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_count = 0;
- struct statvfs *buf = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int32_t op_errno = ENOMEM;
- LOCK (&frame->lock);
- {
- local = frame->local;
-
- if (op_ret != 0) {
- local->op_errno = op_errno;
- goto unlock;
- }
-
- local->op_ret = op_ret;
-
- buf = &local->cont.statfs.buf;
- if (local->cont.statfs.buf_set) {
- if (statvfs->f_bavail < buf->f_bavail) {
- *buf = *statvfs;
- if (xdata) {
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
- local->xdata_rsp = dict_ref (xdata);
- }
- }
- } else {
- *buf = *statvfs;
- local->cont.statfs.buf_set = 1;
- if (xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
- }
-unlock:
- UNLOCK (&frame->lock);
+ priv = this->private;
- call_count = afr_frame_return (frame);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- if (call_count == 0)
- AFR_STACK_UNWIND (statfs, frame, local->op_ret, local->op_errno,
- &local->cont.statfs.buf, local->xdata_rsp);
+ local->op = GF_FOP_LEASE;
+ local->cont.lease.locked_nodes = GF_CALLOC(
+ priv->child_count, sizeof(*local->cont.lease.locked_nodes),
+ gf_afr_mt_char);
- return 0;
+ if (!local->cont.lease.locked_nodes) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ loc_copy(&local->loc, loc);
+ local->cont.lease.user_lease = *lease;
+ local->cont.lease.ret_lease = *lease;
+
+ STACK_WIND_COOKIE(frame, afr_lease_cbk, (void *)(long)0, priv->children[0],
+ priv->children[0]->fops->lease, loc, lease, xdata);
+
+ return 0;
+out:
+ AFR_STACK_UNWIND(lease, frame, -1, op_errno, NULL, NULL);
+
+ return 0;
}
+int
+afr_ipc_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int child_index = (long)cookie;
+ int call_count = 0;
+ gf_boolean_t failed = _gf_false;
+ gf_boolean_t succeeded = _gf_false;
+ int i = 0;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
+
+ call_count = afr_frame_return(frame);
+ if (call_count)
+ goto out;
+ /* If any of the subvolumes failed with other than ENOTCONN
+ * return error else return success unless all the subvolumes
+ * failed.
+ * TODO: In case of failure, we need to unregister the xattrs
+ * from the other subvolumes where it succeeded (once upcall
+ * fixes the Bz-1371622)*/
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret < 0 &&
+ local->replies[i].op_errno != ENOTCONN) {
+ local->op_ret = local->replies[i].op_ret;
+ local->op_errno = local->replies[i].op_errno;
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = NULL;
+ if (local->replies[i].xdata) {
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ }
+ failed = _gf_true;
+ break;
+ }
+ if (local->replies[i].op_ret == 0) {
+ succeeded = _gf_true;
+ local->op_ret = 0;
+ local->op_errno = 0;
+ if (!local->xdata_rsp && local->replies[i].xdata) {
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ }
+ }
+ }
+
+ if (!succeeded && !failed) {
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
+ }
+
+ AFR_STACK_UNWIND(ipc, frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+
+out:
+ return 0;
+}
int
-afr_statfs (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+afr_ipc(call_frame_t *frame, xlator_t *this, int32_t op, dict_t *xdata)
{
- afr_local_t * local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- int call_count = 0;
- int32_t op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ int32_t op_errno = -1;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int call_cnt = -1;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+
+ if (op != GF_IPC_TARGET_UPCALL)
+ goto wind_default;
- priv = this->private;
+ VALIDATE_OR_GOTO(this->private, err);
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto err;
- if (priv->arbiter_count == 1 && local->child_up[ARBITER_BRICK_INDEX])
- local->call_count--;
- call_count = local->call_count;
- if (!call_count) {
- op_errno = ENOTCONN;
- goto out;
- }
+ call_cnt = local->call_count;
+ if (xdata) {
for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- if (AFR_IS_ARBITER_BRICK(priv, i))
- continue;
- STACK_WIND (frame, afr_statfs_cbk,
- priv->children[i],
- priv->children[i]->fops->statfs,
- loc, xdata);
- if (!--call_count)
- break;
- }
+ if (dict_set_int8(xdata, priv->pending_key[i], 0) < 0)
+ goto err;
}
+ }
- return 0;
-out:
- AFR_STACK_UNWIND (statfs, frame, -1, op_errno, NULL, NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->child_up[i])
+ continue;
- return 0;
-}
+ STACK_WIND_COOKIE(frame, afr_ipc_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->ipc, op,
+ xdata);
+ if (!--call_cnt)
+ break;
+ }
+ return 0;
+err:
+ if (op_errno == -1)
+ op_errno = errno;
+ AFR_STACK_UNWIND(ipc, frame, -1, op_errno, NULL);
-int32_t
-afr_lk_unlock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
- dict_t *xdata)
-{
- afr_local_t * local = NULL;
- int call_count = -1;
+ return 0;
- local = frame->local;
- call_count = afr_frame_return (frame);
+wind_default:
+ STACK_WIND(frame, default_ipc_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ipc, op, xdata);
+ return 0;
+}
- if (call_count == 0)
- AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
- lock, xdata);
+int
+afr_forget(xlator_t *this, inode_t *inode)
+{
+ uint64_t ctx_int = 0;
+ afr_inode_ctx_t *ctx = NULL;
+ afr_spb_choice_timeout_cancel(this, inode);
+ inode_ctx_del(inode, this, &ctx_int);
+ if (!ctx_int)
return 0;
+
+ ctx = (afr_inode_ctx_t *)(uintptr_t)ctx_int;
+ afr_inode_ctx_destroy(ctx);
+ return 0;
}
+int
+afr_priv_dump(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ char key_prefix[GF_DUMP_MAX_BUF_LEN];
+ char key[GF_DUMP_MAX_BUF_LEN];
+ int i = 0;
+
+ GF_ASSERT(this);
+ priv = this->private;
+
+ GF_ASSERT(priv);
+ snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name);
+ gf_proc_dump_add_section("%s", key_prefix);
+ gf_proc_dump_write("child_count", "%u", priv->child_count);
+ for (i = 0; i < priv->child_count; i++) {
+ sprintf(key, "child_up[%d]", i);
+ gf_proc_dump_write(key, "%d", priv->child_up[i]);
+ sprintf(key, "pending_key[%d]", i);
+ gf_proc_dump_write(key, "%s", priv->pending_key[i]);
+ sprintf(key, "pending_reads[%d]", i);
+ gf_proc_dump_write(key, "%" PRId64,
+ GF_ATOMIC_GET(priv->pending_reads[i]));
+ sprintf(key, "child_latency[%d]", i);
+ gf_proc_dump_write(key, "%" PRId64, priv->child_latency[i]);
+ sprintf(key, "halo_child_up[%d]", i);
+ gf_proc_dump_write(key, "%d", priv->halo_child_up[i]);
+ }
+ gf_proc_dump_write("data_self_heal", "%d", priv->data_self_heal);
+ gf_proc_dump_write("metadata_self_heal", "%d", priv->metadata_self_heal);
+ gf_proc_dump_write("entry_self_heal", "%d", priv->entry_self_heal);
+ gf_proc_dump_write("read_child", "%d", priv->read_child);
+ gf_proc_dump_write("wait_count", "%u", priv->wait_count);
+ gf_proc_dump_write("heal-wait-queue-length", "%d", priv->heal_wait_qlen);
+ gf_proc_dump_write("heal-waiters", "%d", priv->heal_waiters);
+ gf_proc_dump_write("background-self-heal-count", "%d",
+ priv->background_self_heal_count);
+ gf_proc_dump_write("healers", "%d", priv->healers);
+ gf_proc_dump_write("read-hash-mode", "%d", priv->hash_mode);
+ gf_proc_dump_write("use-anonymous-inode", "%d", priv->use_anon_inode);
+ if (priv->quorum_count == AFR_QUORUM_AUTO) {
+ gf_proc_dump_write("quorum-type", "auto");
+ } else if (priv->quorum_count == 0) {
+ gf_proc_dump_write("quorum-type", "none");
+ } else {
+ gf_proc_dump_write("quorum-type", "fixed");
+ gf_proc_dump_write("quorum-count", "%d", priv->quorum_count);
+ }
+ gf_proc_dump_write("up", "%u", afr_has_quorum(priv->child_up, this, NULL));
+ if (priv->thin_arbiter_count) {
+ gf_proc_dump_write("ta_child_up", "%d", priv->ta_child_up);
+ gf_proc_dump_write("ta_bad_child_index", "%d",
+ priv->ta_bad_child_index);
+ gf_proc_dump_write("ta_notify_dom_lock_offset", "%" PRId64,
+ priv->ta_notify_dom_lock_offset);
+ }
+
+ return 0;
+}
-int32_t
-afr_lk_unlock (call_frame_t *frame, xlator_t *this)
+/**
+ * find_child_index - find the child's index in the array of subvolumes
+ * @this: AFR
+ * @child: child
+ */
+
+static int
+afr_find_child_index(xlator_t *this, xlator_t *child)
{
- afr_local_t * local = NULL;
- afr_private_t * priv = NULL;
- int i = 0;
- int call_count = 0;
+ afr_private_t *priv = NULL;
+ int child_count = -1;
+ int i = -1;
- local = frame->local;
- priv = this->private;
+ priv = this->private;
+ child_count = priv->child_count;
+ if (priv->thin_arbiter_count) {
+ child_count++;
+ }
- call_count = afr_locked_nodes_count (local->cont.lk.locked_nodes,
- priv->child_count);
+ for (i = 0; i < child_count; i++) {
+ if ((xlator_t *)child == priv->children[i])
+ break;
+ }
- if (call_count == 0) {
- AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
- &local->cont.lk.ret_flock, NULL);
- return 0;
- }
+ return i;
+}
+
+int
+__afr_get_up_children_count(afr_private_t *priv)
+{
+ int up_children = 0;
+ int i = 0;
- local->call_count = call_count;
+ for (i = 0; i < priv->child_count; i++)
+ if (priv->child_up[i] == 1)
+ up_children++;
- local->cont.lk.user_flock.l_type = F_UNLCK;
+ return up_children;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (local->cont.lk.locked_nodes[i]) {
- STACK_WIND (frame, afr_lk_unlock_cbk,
- priv->children[i],
- priv->children[i]->fops->lk,
- local->fd, F_SETLK,
- &local->cont.lk.user_flock, NULL);
-
- if (!--call_count)
- break;
- }
- }
+static int
+__get_heard_from_all_status(xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ int i;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!priv->last_event[i]) {
+ return 0;
+ }
+ }
+ if (priv->thin_arbiter_count && !priv->ta_child_up) {
return 0;
+ }
+ return 1;
}
-
-int32_t
-afr_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
+glusterfs_event_t
+__afr_transform_event_from_state(xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int child_index = -1;
-/* int ret = 0; */
+ int i = 0;
+ int up_children = 0;
+ afr_private_t *priv = this->private;
+ if (__get_heard_from_all_status(this))
+ /* have_heard_from_all. Let afr_notify() do the propagation. */
+ return GF_EVENT_MAXVAL;
- local = frame->local;
- priv = this->private;
+ up_children = __afr_get_up_children_count(priv);
+ /* Treat the children with pending notification, as having sent a
+ * GF_EVENT_CHILD_DOWN. i.e. set the event as GF_EVENT_SOME_DESCENDENT_DOWN,
+ * as done in afr_notify() */
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->last_event[i])
+ continue;
+ priv->last_event[i] = GF_EVENT_SOME_DESCENDENT_DOWN;
+ priv->child_up[i] = 0;
+ }
- child_index = (long) cookie;
+ if (up_children)
+ /* We received at least one child up */
+ return GF_EVENT_CHILD_UP;
+ else
+ return GF_EVENT_CHILD_DOWN;
- if (!child_went_down (op_ret, op_errno) && (op_ret == -1)) {
- local->op_ret = -1;
- local->op_errno = op_errno;
+ return GF_EVENT_MAXVAL;
+}
- afr_lk_unlock (frame, this);
- return 0;
- }
+static void
+afr_notify_cbk(void *data)
+{
+ xlator_t *this = data;
+ afr_private_t *priv = this->private;
+ glusterfs_event_t event = GF_EVENT_MAXVAL;
+ gf_boolean_t propagate = _gf_false;
+
+ LOCK(&priv->lock);
+ {
+ if (!priv->timer) {
+ /*
+ * Either child_up/child_down is already sent to parent.
+ * This is a spurious wake up.
+ */
+ goto unlock;
+ }
+ priv->timer = NULL;
+ event = __afr_transform_event_from_state(this);
+ if (event != GF_EVENT_MAXVAL)
+ propagate = _gf_true;
+ }
+unlock:
+ UNLOCK(&priv->lock);
+ if (propagate)
+ default_notify(this, event, NULL);
+}
- if (op_ret == 0) {
- local->op_ret = 0;
- local->op_errno = 0;
- local->cont.lk.locked_nodes[child_index] = 1;
- local->cont.lk.ret_flock = *lock;
- }
+static void
+__afr_launch_notify_timer(xlator_t *this, afr_private_t *priv)
+{
+ struct timespec delay = {
+ 0,
+ };
+
+ gf_msg_debug(this->name, 0, "Initiating child-down timer");
+ delay.tv_sec = 10;
+ delay.tv_nsec = 0;
+ priv->timer = gf_timer_call_after(this->ctx, delay, afr_notify_cbk, this);
+ if (priv->timer == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_TIMER_CREATE_FAIL,
+ "Cannot create timer for delayed initialization");
+ }
+}
- child_index++;
+static int
+find_best_down_child(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ int i = -1;
+ int32_t best_child = -1;
+ int64_t best_latency = INT64_MAX;
- if (child_index < priv->child_count) {
- STACK_WIND_COOKIE (frame, afr_lk_cbk, (void *) (long) child_index,
- priv->children[child_index],
- priv->children[child_index]->fops->lk,
- local->fd, local->cont.lk.cmd,
- &local->cont.lk.user_flock, xdata);
- } else if (local->op_ret == -1) {
- /* all nodes have gone down */
+ priv = this->private;
- AFR_STACK_UNWIND (lk, frame, -1, ENOTCONN,
- &local->cont.lk.ret_flock, NULL);
- } else {
- AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
- &local->cont.lk.ret_flock, NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (!priv->child_up[i] && priv->child_latency[i] >= 0 &&
+ priv->child_latency[i] < best_latency) {
+ best_child = i;
+ best_latency = priv->child_latency[i];
}
-
- return 0;
+ }
+ if (best_child >= 0) {
+ gf_msg_debug(this->name, 0,
+ "Found best down child (%d) @ %" PRId64 " ms latency",
+ best_child, best_latency);
+ }
+ return best_child;
}
-
int
-afr_lk (call_frame_t *frame, xlator_t *this,
- fd_t *fd, int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+find_worst_up_child(xlator_t *this)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t op_errno = ENOMEM;
-
- priv = this->private;
+ afr_private_t *priv = NULL;
+ int i = -1;
+ int32_t worst_child = -1;
+ int64_t worst_latency = INT64_MIN;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ priv = this->private;
- local->cont.lk.locked_nodes = GF_CALLOC (priv->child_count,
- sizeof (*local->cont.lk.locked_nodes),
- gf_afr_mt_char);
-
- if (!local->cont.lk.locked_nodes) {
- op_errno = ENOMEM;
- goto out;
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->child_up[i] && priv->child_latency[i] >= 0 &&
+ priv->child_latency[i] > worst_latency) {
+ worst_child = i;
+ worst_latency = priv->child_latency[i];
}
+ }
+ if (worst_child >= 0) {
+ gf_msg_debug(this->name, 0,
+ "Found worst up child (%d) @ %" PRId64 " ms latency",
+ worst_child, worst_latency);
+ }
+ return worst_child;
+}
- local->fd = fd_ref (fd);
- local->cont.lk.cmd = cmd;
- local->cont.lk.user_flock = *flock;
- local->cont.lk.ret_flock = *flock;
+void
+__afr_handle_ping_event(xlator_t *this, xlator_t *child_xlator, const int idx,
+ int64_t halo_max_latency_msec, int32_t *event,
+ int64_t child_latency_msec)
+{
+ afr_private_t *priv = NULL;
+ int up_children = 0;
- STACK_WIND_COOKIE (frame, afr_lk_cbk, (void *) (long) 0,
- priv->children[i],
- priv->children[i]->fops->lk,
- fd, cmd, flock, xdata);
+ priv = this->private;
- return 0;
-out:
- AFR_STACK_UNWIND (lk, frame, -1, op_errno, NULL, NULL);
+ priv->child_latency[idx] = child_latency_msec;
+ gf_msg_debug(child_xlator->name, 0, "Client ping @ %" PRId64 " ms",
+ child_latency_msec);
+ if (priv->shd.iamshd)
+ return;
- return 0;
+ up_children = __afr_get_up_children_count(priv);
+
+ if (child_latency_msec > halo_max_latency_msec &&
+ priv->child_up[idx] == 1 && up_children > priv->halo_min_replicas) {
+ if ((up_children - 1) < priv->halo_min_replicas) {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Overriding halo threshold, "
+ "min replicas: %d",
+ priv->halo_min_replicas);
+ } else {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Child latency (%" PRId64
+ " ms) "
+ "exceeds halo threshold (%" PRId64
+ "), "
+ "marking child down.",
+ child_latency_msec, halo_max_latency_msec);
+ if (priv->halo_child_up[idx]) {
+ *event = GF_EVENT_CHILD_DOWN;
+ }
+ }
+ } else if (child_latency_msec < halo_max_latency_msec &&
+ priv->child_up[idx] == 0) {
+ if (up_children < priv->halo_max_replicas) {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Child latency (%" PRId64
+ " ms) "
+ "below halo threshold (%" PRId64
+ "), "
+ "marking child up.",
+ child_latency_msec, halo_max_latency_msec);
+ if (priv->halo_child_up[idx]) {
+ *event = GF_EVENT_CHILD_UP;
+ }
+ } else {
+ gf_log(child_xlator->name, GF_LOG_INFO,
+ "Not marking child %d up, "
+ "max replicas (%d) reached.",
+ idx, priv->halo_max_replicas);
+ }
+ }
}
-int
-afr_forget (xlator_t *this, inode_t *inode)
+static int64_t
+afr_get_halo_latency(xlator_t *this)
{
- uint64_t ctx_int = 0;
- afr_inode_ctx_t *ctx = NULL;
+ afr_private_t *priv = NULL;
+ int64_t halo_max_latency_msec = 0;
- afr_spb_choice_timeout_cancel (this, inode);
- inode_ctx_del (inode, this, &ctx_int);
- if (!ctx_int)
- return 0;
+ priv = this->private;
- ctx = (afr_inode_ctx_t *)ctx_int;
- GF_FREE (ctx);
- return 0;
+ if (priv->shd.iamshd) {
+ halo_max_latency_msec = priv->shd.halo_max_latency_msec;
+ } else if (priv->nfsd.iamnfsd) {
+ halo_max_latency_msec = priv->nfsd.halo_max_latency_msec;
+ } else {
+ halo_max_latency_msec = priv->halo_max_latency_msec;
+ }
+ gf_msg_debug(this->name, 0, "Using halo latency %" PRId64,
+ halo_max_latency_msec);
+ return halo_max_latency_msec;
}
-int
-afr_priv_dump (xlator_t *this)
-{
- afr_private_t *priv = NULL;
- char key_prefix[GF_DUMP_MAX_BUF_LEN];
- char key[GF_DUMP_MAX_BUF_LEN];
- int i = 0;
+void
+__afr_handle_child_up_event(xlator_t *this, xlator_t *child_xlator,
+ const int idx, int64_t child_latency_msec,
+ int32_t *event, int32_t *call_psh,
+ int32_t *up_child)
+{
+ afr_private_t *priv = NULL;
+ int up_children = 0;
+ int worst_up_child = -1;
+ int64_t halo_max_latency_msec = afr_get_halo_latency(this);
+
+ priv = this->private;
+
+ /*
+ * This only really counts if the child was never up
+ * (value = -1) or had been down (value = 0). See
+ * comment at GF_EVENT_CHILD_DOWN for a more detailed
+ * explanation.
+ */
+ if (priv->child_up[idx] != 1) {
+ priv->event_generation++;
+ }
+ priv->child_up[idx] = 1;
+
+ *call_psh = 1;
+ *up_child = idx;
+ up_children = __afr_get_up_children_count(priv);
+ /*
+ * If this is an _actual_ CHILD_UP event, we
+ * want to set the child_latency to MAX to indicate
+ * the child needs ping data to be available before doing child-up
+ */
+ if (!priv->halo_enabled)
+ goto out;
+
+ if (child_latency_msec < 0) {
+ /*set to INT64_MAX-1 so that it is found for best_down_child*/
+ priv->halo_child_up[idx] = 1;
+ if (priv->child_latency[idx] < 0) {
+ priv->child_latency[idx] = AFR_HALO_MAX_LATENCY;
+ }
+ }
+
+ /*
+ * Handle the edge case where we exceed
+ * halo_min_replicas and we've got a child which is
+ * marked up as it was helping to satisfy the
+ * halo_min_replicas even though it's latency exceeds
+ * halo_max_latency_msec.
+ */
+ if (up_children > priv->halo_min_replicas) {
+ worst_up_child = find_worst_up_child(this);
+ if (worst_up_child >= 0 &&
+ priv->child_latency[worst_up_child] > halo_max_latency_msec) {
+ gf_msg_debug(this->name, 0,
+ "Marking child %d down, "
+ "doesn't meet halo threshold (%" PRId64
+ "), and > "
+ "halo_min_replicas (%d)",
+ worst_up_child, halo_max_latency_msec,
+ priv->halo_min_replicas);
+ priv->child_up[worst_up_child] = 0;
+ up_children--;
+ }
+ }
+
+ if (up_children > priv->halo_max_replicas && !priv->shd.iamshd) {
+ worst_up_child = find_worst_up_child(this);
+ if (worst_up_child < 0) {
+ worst_up_child = idx;
+ }
+ priv->child_up[worst_up_child] = 0;
+ up_children--;
+ gf_msg_debug(this->name, 0,
+ "Marking child %d down, "
+ "up_children (%d) > halo_max_replicas (%d)",
+ worst_up_child, up_children, priv->halo_max_replicas);
+ }
+out:
+ if (up_children == 1) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SUBVOL_UP,
+ "Subvolume '%s' came back up; "
+ "going online.",
+ child_xlator->name);
+ gf_event(EVENT_AFR_SUBVOL_UP, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ } else {
+ *event = GF_EVENT_SOME_DESCENDENT_UP;
+ }
+ priv->last_event[idx] = *event;
+}
- GF_ASSERT (this);
- priv = this->private;
+void
+__afr_handle_child_down_event(xlator_t *this, xlator_t *child_xlator, int idx,
+ int64_t child_latency_msec, int32_t *event,
+ int32_t *call_psh, int32_t *up_child)
+{
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int up_children = 0;
+ int down_children = 0;
+ int best_down_child = -1;
+
+ priv = this->private;
+
+ /*
+ * If a brick is down when we start, we'll get a
+ * CHILD_DOWN to indicate its initial state. There
+ * was never a CHILD_UP in this case, so if we
+ * increment "down_count" the difference between than
+ * and "up_count" will no longer be the number of
+ * children that are currently up. This has serious
+ * implications e.g. for quorum enforcement, so we
+ * don't increment these values unless the event
+ * represents an actual state transition between "up"
+ * (value = 1) and anything else.
+ */
+ if (priv->child_up[idx] == 1) {
+ priv->event_generation++;
+ }
+
+ /*
+ * If this is an _actual_ CHILD_DOWN event, we
+ * want to set the child_latency to < 0 to indicate
+ * the child is really disconnected.
+ */
+ if (child_latency_msec < 0) {
+ priv->child_latency[idx] = child_latency_msec;
+ priv->halo_child_up[idx] = 0;
+ }
+ priv->child_up[idx] = 0;
+
+ up_children = __afr_get_up_children_count(priv);
+ /*
+ * Handle the edge case where we need to find the
+ * next best child (to mark up) as marking this child
+ * down would cause us to fall below halo_min_replicas.
+ * We will also force the SHD to heal this child _now_
+ * as we want it to be up to date if we are going to
+ * begin using it synchronously.
+ */
+ if (priv->halo_enabled && up_children < priv->halo_min_replicas) {
+ best_down_child = find_best_down_child(this);
+ if (best_down_child >= 0) {
+ gf_msg_debug(this->name, 0,
+ "Swapping out child %d for "
+ "child %d to satisfy halo_min_replicas (%d).",
+ idx, best_down_child, priv->halo_min_replicas);
+ priv->child_up[best_down_child] = 1;
+ *call_psh = 1;
+ *up_child = best_down_child;
+ }
+ }
+ for (i = 0; i < priv->child_count; i++)
+ if (priv->child_up[i] == 0)
+ down_children++;
+ if (down_children == priv->child_count) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SUBVOLS_DOWN,
+ "All subvolumes are down. Going "
+ "offline until at least one of them "
+ "comes back up.");
+ gf_event(EVENT_AFR_SUBVOLS_DOWN, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ } else {
+ *event = GF_EVENT_SOME_DESCENDENT_DOWN;
+ }
+ priv->last_event[idx] = *event;
+}
- GF_ASSERT (priv);
- snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name);
- gf_proc_dump_add_section(key_prefix);
- gf_proc_dump_write("child_count", "%u", priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- sprintf (key, "child_up[%d]", i);
- gf_proc_dump_write(key, "%d", priv->child_up[i]);
- sprintf (key, "pending_key[%d]", i);
- gf_proc_dump_write(key, "%s", priv->pending_key[i]);
- }
- gf_proc_dump_write("data_self_heal", "%s", priv->data_self_heal);
- gf_proc_dump_write("metadata_self_heal", "%d", priv->metadata_self_heal);
- gf_proc_dump_write("entry_self_heal", "%d", priv->entry_self_heal);
- gf_proc_dump_write("data_change_log", "%d", priv->data_change_log);
- gf_proc_dump_write("metadata_change_log", "%d", priv->metadata_change_log);
- gf_proc_dump_write("entry-change_log", "%d", priv->entry_change_log);
- gf_proc_dump_write("read_child", "%d", priv->read_child);
- gf_proc_dump_write("favorite_child", "%d", priv->favorite_child);
- gf_proc_dump_write("wait_count", "%u", priv->wait_count);
- gf_proc_dump_write("quorum-reads", "%d", priv->quorum_reads);
- gf_proc_dump_write("heal-wait-queue-length", "%d",
- priv->heal_wait_qlen);
- gf_proc_dump_write("heal-waiters", "%d", priv->heal_waiters);
- gf_proc_dump_write("background-self-heal-count", "%d",
- priv->background_self_heal_count);
- gf_proc_dump_write("healers", "%d", priv->healers);
+void
+afr_ta_lock_release_synctask(xlator_t *this)
+{
+ call_frame_t *ta_frame = NULL;
+ int ret = 0;
- return 0;
+ ta_frame = afr_ta_frame_create(this);
+ if (!ta_frame) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to create ta_frame");
+ return;
+ }
+
+ ret = synctask_new(this->ctx->env, afr_release_notify_lock_for_ta,
+ afr_ta_lock_release_done, ta_frame, this);
+ if (ret) {
+ STACK_DESTROY(ta_frame->root);
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to release "
+ "AFR_TA_DOM_NOTIFY lock.");
+ }
}
+static void
+afr_handle_inodelk_contention(xlator_t *this, struct gf_upcall *upcall)
+{
+ struct gf_upcall_inodelk_contention *lc = NULL;
+ unsigned int inmem_count = 0;
+ unsigned int onwire_count = 0;
+ afr_private_t *priv = this->private;
-/**
- * find_child_index - find the child's index in the array of subvolumes
- * @this: AFR
- * @child: child
- */
+ lc = upcall->data;
-static int
-find_child_index (xlator_t *this, xlator_t *child)
-{
- afr_private_t *priv = NULL;
- int i = -1;
+ if (strcmp(lc->domain, AFR_TA_DOM_NOTIFY) != 0)
+ return;
- priv = this->private;
+ if (priv->shd.iamshd) {
+ /* shd should ignore AFR_TA_DOM_NOTIFY release requests. */
+ return;
+ }
+ LOCK(&priv->lock);
+ {
+ if (priv->release_ta_notify_dom_lock == _gf_true) {
+ /* Ignore multiple release requests from shds.*/
+ UNLOCK(&priv->lock);
+ return;
+ }
+ priv->release_ta_notify_dom_lock = _gf_true;
+ inmem_count = priv->ta_in_mem_txn_count;
+ onwire_count = priv->ta_on_wire_txn_count;
+ }
+ UNLOCK(&priv->lock);
+ if (inmem_count || onwire_count)
+ /* lock release will happen in txn code path after
+ * in-memory or on-wire txns are over.*/
+ return;
- for (i = 0; i < priv->child_count; i++) {
- if ((xlator_t *) child == priv->children[i])
- break;
- }
+ afr_ta_lock_release_synctask(this);
+}
- return i;
+static void
+afr_handle_upcall_event(xlator_t *this, struct gf_upcall *upcall)
+{
+ struct gf_upcall_cache_invalidation *up_ci = NULL;
+ afr_private_t *priv = this->private;
+ inode_t *inode = NULL;
+ inode_table_t *itable = NULL;
+ int i = 0;
+
+ switch (upcall->event_type) {
+ case GF_UPCALL_INODELK_CONTENTION:
+ afr_handle_inodelk_contention(this, upcall);
+ break;
+ case GF_UPCALL_CACHE_INVALIDATION:
+ up_ci = (struct gf_upcall_cache_invalidation *)upcall->data;
+
+ /* Since md-cache will be aggressively filtering
+ * lookups, the stale read issue will be more
+ * pronounced. Hence when a pending xattr is set notify
+ * all the md-cache clients to invalidate the existing
+ * stat cache and send the lookup next time */
+ if (!up_ci->dict)
+ break;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!dict_get(up_ci->dict, priv->pending_key[i]))
+ continue;
+ up_ci->flags |= UP_INVAL_ATTR;
+ itable = ((xlator_t *)this->graph->top)->itable;
+ /*Internal processes may not have itable for
+ *top xlator*/
+ if (itable)
+ inode = inode_find(itable, upcall->gfid);
+ if (inode)
+ afr_inode_need_refresh_set(inode, this);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
}
int32_t
-afr_notify (xlator_t *this, int32_t event,
- void *data, void *data2)
-{
- afr_private_t *priv = NULL;
- int i = -1;
- int up_children = 0;
- int down_children = 0;
- int propagate = 0;
- int had_heard_from_all = 0;
- int have_heard_from_all = 0;
- int idx = -1;
- int ret = -1;
- int call_psh = 0;
- dict_t *input = NULL;
- dict_t *output = NULL;
- gf_boolean_t had_quorum = _gf_false;
- gf_boolean_t has_quorum = _gf_false;
-
- priv = this->private;
-
- if (!priv)
- return 0;
-
- /*
- * We need to reset this in case children come up in "staggered"
- * fashion, so that we discover a late-arriving local subvolume. Note
- * that we could end up issuing N lookups to the first subvolume, and
- * O(N^2) overall, but N is small for AFR so it shouldn't be an issue.
- */
- priv->did_discovery = _gf_false;
+afr_notify(xlator_t *this, int32_t event, void *data, void *data2)
+{
+ afr_private_t *priv = NULL;
+ xlator_t *child_xlator = NULL;
+ int i = -1;
+ int propagate = 0;
+ int had_heard_from_all = 0;
+ int have_heard_from_all = 0;
+ int idx = -1;
+ int ret = -1;
+ int call_psh = 0;
+ int up_child = -1;
+ dict_t *input = NULL;
+ dict_t *output = NULL;
+ gf_boolean_t had_quorum = _gf_false;
+ gf_boolean_t has_quorum = _gf_false;
+ int64_t halo_max_latency_msec = 0;
+ int64_t child_latency_msec = -1;
+
+ child_xlator = (xlator_t *)data;
+
+ priv = this->private;
+
+ if (!priv)
+ return 0;
- had_heard_from_all = 1;
- for (i = 0; i < priv->child_count; i++) {
- if (!priv->last_event[i]) {
- had_heard_from_all = 0;
- }
+ /*
+ * We need to reset this in case children come up in "staggered"
+ * fashion, so that we discover a late-arriving local subvolume. Note
+ * that we could end up issuing N lookups to the first subvolume, and
+ * O(N^2) overall, but N is small for AFR so it shouldn't be an issue.
+ */
+ priv->did_discovery = _gf_false;
+
+ /* parent xlators don't need to know about every child_up, child_down
+ * because of afr ha. If all subvolumes go down, child_down has
+ * to be triggered. In that state when 1 subvolume comes up child_up
+ * needs to be triggered. dht optimizes revalidate lookup by sending
+ * it only to one of its subvolumes. When child up/down happens
+ * for afr's subvolumes dht should be notified by child_modified. The
+ * subsequent revalidate lookup happens on all the dht's subvolumes
+ * which triggers afr self-heals if any.
+ */
+ idx = afr_find_child_index(this, child_xlator);
+ if (idx < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_CHILD_UP,
+ "Received child_up from invalid subvolume");
+ goto out;
+ }
+
+ had_quorum = priv->quorum_count &&
+ afr_has_quorum(priv->child_up, this, NULL);
+ if (event == GF_EVENT_CHILD_PING) {
+ child_latency_msec = (int64_t)(uintptr_t)data2;
+ if (priv->halo_enabled) {
+ halo_max_latency_msec = afr_get_halo_latency(this);
+
+ /* Calculates the child latency and sets event
+ */
+ LOCK(&priv->lock);
+ {
+ __afr_handle_ping_event(this, child_xlator, idx,
+ halo_max_latency_msec, &event,
+ child_latency_msec);
+ }
+ UNLOCK(&priv->lock);
+ } else {
+ LOCK(&priv->lock);
+ {
+ priv->child_latency[idx] = child_latency_msec;
+ }
+ UNLOCK(&priv->lock);
}
+ }
- /* parent xlators dont need to know about every child_up, child_down
- * because of afr ha. If all subvolumes go down, child_down has
- * to be triggered. In that state when 1 subvolume comes up child_up
- * needs to be triggered. dht optimizes revalidate lookup by sending
- * it only to one of its subvolumes. When child up/down happens
- * for afr's subvolumes dht should be notified by child_modified. The
- * subsequent revalidate lookup happens on all the dht's subvolumes
- * which triggers afr self-heals if any.
+ if (event == GF_EVENT_CHILD_PING) {
+ /* This is the only xlator that handles PING, no reason to
+ * propagate.
*/
- idx = find_child_index (this, data);
- if (idx < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_CHILD_UP,
- "Received child_up from invalid subvolume");
- goto out;
+ goto out;
+ }
+
+ if (event == GF_EVENT_TRANSLATOR_OP) {
+ LOCK(&priv->lock);
+ {
+ had_heard_from_all = __get_heard_from_all_status(this);
+ }
+ UNLOCK(&priv->lock);
+
+ if (!had_heard_from_all) {
+ ret = -1;
+ } else {
+ input = data;
+ output = data2;
+ ret = afr_xl_op(this, input, output);
}
+ goto out;
+ }
- had_quorum = priv->quorum_count && afr_has_quorum (priv->child_up,
- this);
+ if (event == GF_EVENT_UPCALL) {
+ afr_handle_upcall_event(this, data);
+ }
+
+ LOCK(&priv->lock);
+ {
+ had_heard_from_all = __get_heard_from_all_status(this);
switch (event) {
- case GF_EVENT_CHILD_UP:
- LOCK (&priv->lock);
- {
- /*
- * This only really counts if the child was never up
- * (value = -1) or had been down (value = 0). See
- * comment at GF_EVENT_CHILD_DOWN for a more detailed
- * explanation.
- */
- if (priv->child_up[idx] != 1) {
- priv->up_count++;
- priv->event_generation++;
- }
- priv->child_up[idx] = 1;
-
- call_psh = 1;
- for (i = 0; i < priv->child_count; i++)
- if (priv->child_up[i] == 1)
- up_children++;
- if (up_children == 1) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_SUBVOL_UP,
- "Subvolume '%s' came back up; "
- "going online.", ((xlator_t *)data)->name);
- } else {
- event = GF_EVENT_CHILD_MODIFIED;
- }
-
- priv->last_event[idx] = event;
+ case GF_EVENT_PARENT_UP:
+ __afr_launch_notify_timer(this, priv);
+ propagate = 1;
+ break;
+ case GF_EVENT_CHILD_UP:
+ if (priv->thin_arbiter_count &&
+ (idx == AFR_CHILD_THIN_ARBITER)) {
+ priv->ta_child_up = 1;
+ priv->ta_event_gen++;
+ break;
}
- UNLOCK (&priv->lock);
-
+ __afr_handle_child_up_event(this, child_xlator, idx,
+ child_latency_msec, &event,
+ &call_psh, &up_child);
+ __afr_lock_heal_synctask(this, priv, idx);
break;
- case GF_EVENT_CHILD_DOWN:
- LOCK (&priv->lock);
- {
- /*
- * If a brick is down when we start, we'll get a
- * CHILD_DOWN to indicate its initial state. There
- * was never a CHILD_UP in this case, so if we
- * increment "down_count" the difference between than
- * and "up_count" will no longer be the number of
- * children that are currently up. This has serious
- * implications e.g. for quorum enforcement, so we
- * don't increment these values unless the event
- * represents an actual state transition between "up"
- * (value = 1) and anything else.
- */
- if (priv->child_up[idx] == 1) {
- priv->down_count++;
- priv->event_generation++;
- }
- priv->child_up[idx] = 0;
-
- for (i = 0; i < priv->child_count; i++)
- if (priv->child_up[i] == 0)
- down_children++;
- if (down_children == priv->child_count) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_ALL_SUBVOLS_DOWN,
- "All subvolumes are down. Going offline "
- "until atleast one of them comes back up.");
- } else {
- event = GF_EVENT_SOME_CHILD_DOWN;
- }
-
- priv->last_event[idx] = event;
+ case GF_EVENT_CHILD_DOWN:
+ if (priv->thin_arbiter_count &&
+ (idx == AFR_CHILD_THIN_ARBITER)) {
+ priv->ta_child_up = 0;
+ priv->ta_event_gen++;
+ afr_ta_locked_priv_invalidate(priv);
+ break;
}
- UNLOCK (&priv->lock);
-
+ __afr_handle_child_down_event(this, child_xlator, idx,
+ child_latency_msec, &event,
+ &call_psh, &up_child);
+ __afr_mark_pending_lk_heal(this, priv, idx);
break;
- case GF_EVENT_CHILD_CONNECTING:
- LOCK (&priv->lock);
- {
- priv->last_event[idx] = event;
- }
- UNLOCK (&priv->lock);
+ case GF_EVENT_CHILD_CONNECTING:
+ priv->last_event[idx] = event;
break;
- case GF_EVENT_TRANSLATOR_OP:
- input = data;
- output = data2;
- if (!had_heard_from_all) {
- ret = -1;
- goto out;
- }
- ret = afr_xl_op (this, input, output);
- goto out;
+ case GF_EVENT_SOME_DESCENDENT_DOWN:
+ priv->last_event[idx] = event;
break;
-
- default:
+ default:
propagate = 1;
break;
}
-
- if (priv->quorum_count) {
- has_quorum = afr_has_quorum (priv->child_up, this);
- if (!had_quorum && has_quorum)
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_QUORUM_MET,
- "Client-quorum is met");
- if (had_quorum && !has_quorum)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_QUORUM_FAIL,
- "Client-quorum is not met");
- }
-
- /* have all subvolumes reported status once by now? */
- have_heard_from_all = 1;
- for (i = 0; i < priv->child_count; i++) {
- if (!priv->last_event[i])
- have_heard_from_all = 0;
- }
-
- /* if all subvols have reported status, no need to hide anything
- or wait for anything else. Just propagate blindly */
- if (have_heard_from_all)
- propagate = 1;
-
+ have_heard_from_all = __get_heard_from_all_status(this);
if (!had_heard_from_all && have_heard_from_all) {
- /* This is the first event which completes aggregation
- of events from all subvolumes. If at least one subvol
- had come up, propagate CHILD_UP, but only this time
- */
- event = GF_EVENT_CHILD_DOWN;
-
- LOCK (&priv->lock);
- {
- up_children = AFR_COUNT (priv->child_up, priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- if (priv->last_event[i] == GF_EVENT_CHILD_UP) {
- event = GF_EVENT_CHILD_UP;
- break;
- }
-
- if (priv->last_event[i] ==
- GF_EVENT_CHILD_CONNECTING) {
- event = GF_EVENT_CHILD_CONNECTING;
- /* continue to check other events for CHILD_UP */
- }
- }
+ if (priv->timer) {
+ gf_timer_call_cancel(this->ctx, priv->timer);
+ priv->timer = NULL;
+ }
+ /* This is the first event which completes aggregation
+ of events from all subvolumes. If at least one subvol
+ had come up, propagate CHILD_UP, but only this time
+ */
+ event = GF_EVENT_CHILD_DOWN;
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->last_event[i] == GF_EVENT_CHILD_UP) {
+ event = GF_EVENT_CHILD_UP;
+ break;
}
- UNLOCK (&priv->lock);
- }
- ret = 0;
- if (propagate)
- ret = default_notify (this, event, data);
-
- if ((!had_heard_from_all) || call_psh) {
- /* Launch self-heal on all local subvolumes if:
- * a) We have_heard_from_all for the first time
- * b) Already heard from everyone, but we now got a child-up
- * event.
- */
- if (have_heard_from_all && priv->shd.iamshd) {
- for (i = 0; i < priv->child_count; i++)
- if (priv->child_up[i])
- afr_selfheal_childup (this, i);
+ if (priv->last_event[i] == GF_EVENT_CHILD_CONNECTING) {
+ event = GF_EVENT_CHILD_CONNECTING;
+ /* continue to check other events for CHILD_UP */
}
+ }
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (priv->quorum_count) {
+ has_quorum = afr_has_quorum(priv->child_up, this, NULL);
+ if (!had_quorum && has_quorum) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_QUORUM_MET,
+ "Client-quorum is met");
+ gf_event(EVENT_AFR_QUORUM_MET, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ }
+ if (had_quorum && !has_quorum) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_QUORUM_FAIL,
+ "Client-quorum is not met");
+ gf_event(EVENT_AFR_QUORUM_FAIL, "client-pid=%d; subvol=%s",
+ this->ctx->cmd_args.client_pid, this->name);
+ }
+ }
+
+ /* if all subvols have reported status, no need to hide anything
+ or wait for anything else. Just propagate blindly */
+ if (have_heard_from_all)
+ propagate = 1;
+
+ ret = 0;
+ if (propagate)
+ ret = default_notify(this, event, data);
+
+ if ((!had_heard_from_all) || call_psh) {
+ /* Launch self-heal on all local subvolumes if:
+ * a) We have_heard_from_all for the first time
+ * b) Already heard from everyone, but we now got a child-up
+ * event.
+ */
+ if (have_heard_from_all) {
+ afr_selfheal_childup(this, priv);
}
+ }
out:
- return ret;
+ return ret;
}
-
int
-afr_local_init (afr_local_t *local, afr_private_t *priv, int32_t *op_errno)
-{
- local->op_ret = -1;
- local->op_errno = EUCLEAN;
-
- syncbarrier_init (&local->barrier);
-
- local->child_up = GF_CALLOC (priv->child_count,
- sizeof (*local->child_up),
- gf_afr_mt_char);
- if (!local->child_up) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
-
- memcpy (local->child_up, priv->child_up,
- sizeof (*local->child_up) * priv->child_count);
- local->call_count = AFR_COUNT (local->child_up, priv->child_count);
- if (local->call_count == 0) {
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- AFR_MSG_ALL_SUBVOLS_DOWN, "no subvolumes up");
- if (op_errno)
- *op_errno = ENOTCONN;
- goto out;
- }
- local->event_generation = priv->event_generation;
-
- local->read_attempted = GF_CALLOC (priv->child_count, sizeof (char),
- gf_afr_mt_char);
- if (!local->read_attempted) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
-
- local->readable = GF_CALLOC (priv->child_count, sizeof (char),
- gf_afr_mt_char);
- if (!local->readable) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
-
- local->replies = GF_CALLOC(priv->child_count, sizeof(*local->replies),
- gf_afr_mt_reply_t);
- if (!local->replies) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
- }
-
- INIT_LIST_HEAD (&local->healer);
- return 0;
+afr_local_init(afr_local_t *local, afr_private_t *priv, int32_t *op_errno)
+{
+ int __ret = -1;
+ local->op_ret = -1;
+ local->op_errno = EUCLEAN;
+
+ __ret = syncbarrier_init(&local->barrier);
+ if (__ret) {
+ if (op_errno)
+ *op_errno = __ret;
+ goto out;
+ }
+
+ local->child_up = GF_MALLOC(priv->child_count * sizeof(*local->child_up),
+ gf_afr_mt_char);
+ if (!local->child_up) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ memcpy(local->child_up, priv->child_up,
+ sizeof(*local->child_up) * priv->child_count);
+ local->call_count = AFR_COUNT(local->child_up, priv->child_count);
+ if (local->call_count == 0) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, AFR_MSG_SUBVOLS_DOWN,
+ "no subvolumes up");
+ if (op_errno)
+ *op_errno = ENOTCONN;
+ goto out;
+ }
+
+ local->event_generation = priv->event_generation;
+
+ local->read_attempted = GF_CALLOC(priv->child_count, sizeof(char),
+ gf_afr_mt_char);
+ if (!local->read_attempted) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->readable = GF_CALLOC(priv->child_count, sizeof(char),
+ gf_afr_mt_char);
+ if (!local->readable) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->readable2 = GF_CALLOC(priv->child_count, sizeof(char),
+ gf_afr_mt_char);
+ if (!local->readable2) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->read_subvol = -1;
+
+ local->replies = GF_CALLOC(priv->child_count, sizeof(*local->replies),
+ gf_afr_mt_reply_t);
+ if (!local->replies) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->need_full_crawl = _gf_false;
+ if (priv->thin_arbiter_count) {
+ local->ta_child_up = priv->ta_child_up;
+ local->ta_failed_subvol = AFR_CHILD_UNKNOWN;
+ local->read_txn_query_child = AFR_CHILD_UNKNOWN;
+ local->ta_event_gen = priv->ta_event_gen;
+ local->fop_state = TA_SUCCESS;
+ }
+ local->is_new_entry = _gf_false;
+
+ INIT_LIST_HEAD(&local->healer);
+ return 0;
out:
- return -1;
+ return -1;
}
int
-afr_internal_lock_init (afr_internal_lock_t *lk, size_t child_count,
- transaction_lk_type_t lk_type)
+afr_internal_lock_init(afr_internal_lock_t *lk, size_t child_count)
{
- int ret = -ENOMEM;
-
- lk->locked_nodes = GF_CALLOC (sizeof (*lk->locked_nodes),
- child_count, gf_afr_mt_char);
- if (NULL == lk->locked_nodes)
- goto out;
+ int ret = -ENOMEM;
- lk->lower_locked_nodes = GF_CALLOC (sizeof (*lk->lower_locked_nodes),
- child_count, gf_afr_mt_char);
- if (NULL == lk->lower_locked_nodes)
- goto out;
+ lk->lower_locked_nodes = GF_CALLOC(sizeof(*lk->lower_locked_nodes),
+ child_count, gf_afr_mt_char);
+ if (NULL == lk->lower_locked_nodes)
+ goto out;
- lk->lock_op_ret = -1;
- lk->lock_op_errno = EUCLEAN;
- lk->transaction_lk_type = lk_type;
+ lk->lock_op_ret = -1;
+ lk->lock_op_errno = EUCLEAN;
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
void
-afr_matrix_cleanup (int32_t **matrix, unsigned int m)
+afr_matrix_cleanup(int32_t **matrix, unsigned int m)
{
- int i = 0;
+ int i = 0;
- if (!matrix)
- goto out;
- for (i = 0; i < m; i++) {
- GF_FREE (matrix[i]);
- }
+ if (!matrix)
+ goto out;
+ for (i = 0; i < m; i++) {
+ GF_FREE(matrix[i]);
+ }
- GF_FREE (matrix);
+ GF_FREE(matrix);
out:
- return;
+ return;
}
-int32_t**
-afr_matrix_create (unsigned int m, unsigned int n)
+int32_t **
+afr_matrix_create(unsigned int m, unsigned int n)
{
- int32_t **matrix = NULL;
- int i = 0;
+ int32_t **matrix = NULL;
+ int i = 0;
- matrix = GF_CALLOC (sizeof (*matrix), m, gf_afr_mt_int32_t);
- if (!matrix)
- goto out;
+ matrix = GF_CALLOC(sizeof(*matrix), m, gf_afr_mt_int32_t);
+ if (!matrix)
+ goto out;
- for (i = 0; i < m; i++) {
- matrix[i] = GF_CALLOC (sizeof (*matrix[i]), n,
- gf_afr_mt_int32_t);
- if (!matrix[i])
- goto out;
- }
- return matrix;
+ for (i = 0; i < m; i++) {
+ matrix[i] = GF_CALLOC(sizeof(*matrix[i]), n, gf_afr_mt_int32_t);
+ if (!matrix[i])
+ goto out;
+ }
+ return matrix;
out:
- afr_matrix_cleanup (matrix, m);
- return NULL;
+ afr_matrix_cleanup(matrix, m);
+ return NULL;
}
int
-afr_inodelk_init (afr_inodelk_t *lk, char *dom, size_t child_count)
-{
- int ret = -ENOMEM;
-
- lk->domain = dom;
- lk->locked_nodes = GF_CALLOC (sizeof (*lk->locked_nodes),
- child_count, gf_afr_mt_char);
- if (NULL == lk->locked_nodes)
- goto out;
- ret = 0;
+afr_transaction_local_init(afr_local_t *local, xlator_t *this)
+{
+ int ret = -ENOMEM;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ INIT_LIST_HEAD(&local->transaction.wait_list);
+ INIT_LIST_HEAD(&local->transaction.owner_list);
+ INIT_LIST_HEAD(&local->ta_waitq);
+ INIT_LIST_HEAD(&local->ta_onwireq);
+ ret = afr_internal_lock_init(&local->internal_lock, priv->child_count);
+ if (ret < 0)
+ goto out;
+
+ ret = -ENOMEM;
+ local->pre_op_compat = priv->pre_op_compat;
+
+ local->transaction.pre_op = GF_CALLOC(sizeof(*local->transaction.pre_op),
+ priv->child_count, gf_afr_mt_char);
+ if (!local->transaction.pre_op)
+ goto out;
+
+ local->transaction.changelog_xdata = GF_CALLOC(
+ sizeof(*local->transaction.changelog_xdata), priv->child_count,
+ gf_afr_mt_dict_t);
+ if (!local->transaction.changelog_xdata)
+ goto out;
+
+ if (priv->arbiter_count == 1) {
+ local->transaction.pre_op_sources = GF_CALLOC(
+ sizeof(*local->transaction.pre_op_sources), priv->child_count,
+ gf_afr_mt_char);
+ if (!local->transaction.pre_op_sources)
+ goto out;
+ }
+
+ local->transaction.failed_subvols = GF_CALLOC(
+ sizeof(*local->transaction.failed_subvols), priv->child_count,
+ gf_afr_mt_char);
+ if (!local->transaction.failed_subvols)
+ goto out;
+
+ local->pending = afr_matrix_create(priv->child_count, AFR_NUM_CHANGE_LOGS);
+ if (!local->pending)
+ goto out;
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
-int
-afr_transaction_local_init (afr_local_t *local, xlator_t *this)
+void
+afr_set_low_priority(call_frame_t *frame)
{
- int child_up_count = 0;
- int ret = -ENOMEM;
- afr_private_t *priv = NULL;
+ frame->root->pid = LOW_PRIO_PROC_PID;
+}
- priv = this->private;
- ret = afr_internal_lock_init (&local->internal_lock, priv->child_count,
- AFR_TRANSACTION_LK);
- if (ret < 0)
- goto out;
+void
+afr_priv_destroy(afr_private_t *priv)
+{
+ int i = 0;
+ int child_count = -1;
+
+ if (!priv)
+ goto out;
+
+ GF_FREE(priv->sh_domain);
+ GF_FREE(priv->last_event);
+
+ child_count = priv->child_count;
+ if (priv->thin_arbiter_count) {
+ child_count++;
+ }
+ if (priv->pending_key) {
+ for (i = 0; i < child_count; i++)
+ GF_FREE(priv->pending_key[i]);
+ }
+
+ GF_FREE(priv->pending_reads);
+ GF_FREE(priv->local);
+ GF_FREE(priv->pending_key);
+ GF_FREE(priv->children);
+ GF_FREE(priv->anon_inode);
+ GF_FREE(priv->child_up);
+ GF_FREE(priv->halo_child_up);
+ GF_FREE(priv->child_latency);
+ LOCK_DESTROY(&priv->lock);
+
+ GF_FREE(priv);
+out:
+ return;
+}
- if ((local->transaction.type == AFR_DATA_TRANSACTION) ||
- (local->transaction.type == AFR_METADATA_TRANSACTION)) {
- ret = afr_inodelk_init (&local->internal_lock.inodelk[0],
- this->name, priv->child_count);
- if (ret < 0)
- goto out;
- }
+int **
+afr_mark_pending_changelog(afr_private_t *priv, unsigned char *pending,
+ dict_t *xattr, ia_type_t iat)
+{
+ int i = 0;
+ int **changelog = NULL;
+ int idx = -1;
+ int m_idx = 0;
+ int d_idx = 0;
+ int ret = 0;
- ret = -ENOMEM;
- child_up_count = AFR_COUNT (local->child_up, priv->child_count);
- if (priv->optimistic_change_log && child_up_count == priv->child_count)
- local->optimistic_change_log = 1;
+ m_idx = afr_index_for_transaction_type(AFR_METADATA_TRANSACTION);
+ d_idx = afr_index_for_transaction_type(AFR_DATA_TRANSACTION);
- local->pre_op_compat = priv->pre_op_compat;
+ idx = afr_index_from_ia_type(iat);
- local->transaction.eager_lock =
- GF_CALLOC (sizeof (*local->transaction.eager_lock),
- priv->child_count,
- gf_afr_mt_int32_t);
+ changelog = afr_matrix_create(priv->child_count, AFR_NUM_CHANGE_LOGS);
+ if (!changelog)
+ goto out;
- if (!local->transaction.eager_lock)
- goto out;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!pending[i])
+ continue;
- local->transaction.pre_op = GF_CALLOC (sizeof (*local->transaction.pre_op),
- priv->child_count,
- gf_afr_mt_char);
- if (!local->transaction.pre_op)
- goto out;
+ changelog[i][m_idx] = hton32(1);
+ if (idx != -1)
+ changelog[i][idx] = hton32(1);
+ /* If the newentry marking is on a newly created directory,
+ * then mark it with the full-heal indicator.
+ */
+ if ((IA_ISDIR(iat)) && (priv->esh_granular))
+ changelog[i][d_idx] = hton32(1);
+ }
+ ret = afr_set_pending_dict(priv, xattr, changelog);
+ if (ret < 0) {
+ afr_matrix_cleanup(changelog, priv->child_count);
+ return NULL;
+ }
+out:
+ return changelog;
+}
- if (priv->arbiter_count == 1) {
- local->transaction.pre_op_xdata =
- GF_CALLOC (sizeof (*local->transaction.pre_op_xdata),
- priv->child_count, gf_afr_mt_dict_t);
- if (!local->transaction.pre_op_xdata)
- goto out;
+static dict_t *
+afr_set_heal_info(char *status)
+{
+ dict_t *dict = NULL;
+ int ret = -1;
- local->transaction.pre_op_sources =
- GF_CALLOC (sizeof (*local->transaction.pre_op_sources),
- priv->child_count, gf_afr_mt_char);
- if (!local->transaction.pre_op_sources)
- goto out;
- }
+ dict = dict_new();
+ if (!dict) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = dict_set_dynstr_sizen(dict, "heal-info", status);
+ if (ret)
+ gf_msg("", GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Failed to set heal-info key to "
+ "%s",
+ status);
+out:
+ /* Any error other than EINVAL, dict_set_dynstr frees status */
+ if (ret == -ENOMEM || ret == -EINVAL) {
+ GF_FREE(status);
+ }
- local->transaction.failed_subvols = GF_CALLOC (sizeof (*local->transaction.failed_subvols),
- priv->child_count,
- gf_afr_mt_char);
- if (!local->transaction.failed_subvols)
- goto out;
+ if (ret && dict) {
+ dict_unref(dict);
+ dict = NULL;
+ }
+ return dict;
+}
- local->pending = afr_matrix_create (priv->child_count,
- AFR_NUM_CHANGE_LOGS);
- if (!local->pending)
- goto out;
+static gf_boolean_t
+afr_is_dirty_count_non_unary_for_txn(xlator_t *this, struct afr_reply *replies,
+ afr_transaction_type type)
+{
+ afr_private_t *priv = this->private;
+ int *dirty = alloca0(priv->child_count * sizeof(int));
+ int i = 0;
- INIT_LIST_HEAD (&local->transaction.eager_locked);
+ afr_selfheal_extract_xattr(this, replies, type, dirty, NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (dirty[i] > 1)
+ return _gf_true;
+ }
- ret = 0;
-out:
- return ret;
+ return _gf_false;
}
+static gf_boolean_t
+afr_is_dirty_count_non_unary(xlator_t *this, struct afr_reply *replies,
+ ia_type_t ia_type)
+{
+ gf_boolean_t data_chk = _gf_false;
+ gf_boolean_t mdata_chk = _gf_false;
+ gf_boolean_t entry_chk = _gf_false;
+
+ switch (ia_type) {
+ case IA_IFDIR:
+ mdata_chk = _gf_true;
+ entry_chk = _gf_true;
+ break;
+ case IA_IFREG:
+ mdata_chk = _gf_true;
+ data_chk = _gf_true;
+ break;
+ default:
+ /*IA_IFBLK, IA_IFCHR, IA_IFLNK, IA_IFIFO, IA_IFSOCK*/
+ mdata_chk = _gf_true;
+ break;
+ }
-void
-afr_set_low_priority (call_frame_t *frame)
-{
- frame->root->pid = LOW_PRIO_PROC_PID;
+ if (data_chk && afr_is_dirty_count_non_unary_for_txn(
+ this, replies, AFR_DATA_TRANSACTION)) {
+ return _gf_true;
+ } else if (mdata_chk && afr_is_dirty_count_non_unary_for_txn(
+ this, replies, AFR_METADATA_TRANSACTION)) {
+ return _gf_true;
+ } else if (entry_chk && afr_is_dirty_count_non_unary_for_txn(
+ this, replies, AFR_ENTRY_TRANSACTION)) {
+ return _gf_true;
+ }
+
+ return _gf_false;
}
+static int
+afr_update_heal_status(xlator_t *this, struct afr_reply *replies,
+ ia_type_t ia_type, gf_boolean_t *esh, gf_boolean_t *dsh,
+ gf_boolean_t *msh, unsigned char pending)
+{
+ int ret = -1;
+ GF_UNUSED int ret1 = 0;
+ int i = 0;
+ int io_domain_lk_count = 0;
+ int shd_domain_lk_count = 0;
+ afr_private_t *priv = NULL;
+ char *key1 = NULL;
+ char *key2 = NULL;
+
+ priv = this->private;
+ key1 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
+ strlen(this->name));
+ key2 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
+ strlen(priv->sh_domain));
+ sprintf(key1, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, this->name);
+ sprintf(key2, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, priv->sh_domain);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if ((replies[i].valid != 1) || (replies[i].op_ret != 0))
+ continue;
+ if (!io_domain_lk_count) {
+ ret1 = dict_get_int32(replies[i].xdata, key1, &io_domain_lk_count);
+ }
+ if (!shd_domain_lk_count) {
+ ret1 = dict_get_int32(replies[i].xdata, key2, &shd_domain_lk_count);
+ }
+ }
+
+ if (!pending) {
+ if ((afr_is_dirty_count_non_unary(this, replies, ia_type)) ||
+ (!io_domain_lk_count)) {
+ /* Needs heal. */
+ ret = 0;
+ } else {
+ /* No heal needed. */
+ *dsh = *esh = *msh = 0;
+ }
+ } else {
+ if (shd_domain_lk_count) {
+ ret = -EAGAIN; /*For 'possibly-healing'. */
+ } else {
+ ret = 0; /*needs heal. Just set a non -ve value so that it is
+ assumed as the source index.*/
+ }
+ }
+ return ret;
+}
-gf_boolean_t
-afr_have_quorum (char *logname, afr_private_t *priv)
-{
- unsigned int quorum = 0;
+/*return EIO, EAGAIN or pending*/
+int
+afr_lockless_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+ inode_t **inode, gf_boolean_t *entry_selfheal,
+ gf_boolean_t *data_selfheal,
+ gf_boolean_t *metadata_selfheal, unsigned char *pending)
+{
+ int ret = -1;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ gf_boolean_t dsh = _gf_false;
+ gf_boolean_t msh = _gf_false;
+ gf_boolean_t esh = _gf_false;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ unsigned char *valid_on = NULL;
+ uint64_t *witness = NULL;
+
+ priv = this->private;
+ replies = alloca0(sizeof(*replies) * priv->child_count);
+ sources = alloca0(sizeof(*sources) * priv->child_count);
+ sinks = alloca0(sizeof(*sinks) * priv->child_count);
+ witness = alloca0(sizeof(*witness) * priv->child_count);
+ valid_on = alloca0(sizeof(*valid_on) * priv->child_count);
+
+ ret = afr_selfheal_unlocked_inspect(frame, this, gfid, inode, &dsh, &msh,
+ &esh, replies);
+ if (ret)
+ goto out;
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid && replies[i].op_ret == 0) {
+ valid_on[i] = 1;
+ }
+ }
+ if (msh) {
+ ret = afr_selfheal_find_direction(frame, this, replies,
+ AFR_METADATA_TRANSACTION, valid_on,
+ sources, sinks, witness, pending);
+ if (*pending & PFLAG_SBRAIN)
+ ret = -EIO;
+ if (ret)
+ goto out;
+ }
+ if (dsh) {
+ ret = afr_selfheal_find_direction(frame, this, replies,
+ AFR_DATA_TRANSACTION, valid_on,
+ sources, sinks, witness, pending);
+ if (*pending & PFLAG_SBRAIN)
+ ret = -EIO;
+ if (ret)
+ goto out;
+ }
+ if (esh) {
+ ret = afr_selfheal_find_direction(frame, this, replies,
+ AFR_ENTRY_TRANSACTION, valid_on,
+ sources, sinks, witness, pending);
+ if (*pending & PFLAG_SBRAIN)
+ ret = -EIO;
+ if (ret)
+ goto out;
+ }
- GF_VALIDATE_OR_GOTO(logname,priv,out);
+ ret = afr_update_heal_status(this, replies, (*inode)->ia_type, &esh, &dsh,
+ &msh, *pending);
+out:
+ *data_selfheal = dsh;
+ *entry_selfheal = esh;
+ *metadata_selfheal = msh;
+ if (replies)
+ afr_replies_wipe(replies, priv->child_count);
+ return ret;
+}
- quorum = priv->quorum_count;
- if (quorum != AFR_QUORUM_AUTO) {
- return (priv->up_count >= (priv->down_count + quorum));
+int
+afr_get_heal_info(call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ gf_boolean_t data_selfheal = _gf_false;
+ gf_boolean_t metadata_selfheal = _gf_false;
+ gf_boolean_t entry_selfheal = _gf_false;
+ unsigned char pending = 0;
+ dict_t *dict = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+ inode_t *inode = NULL;
+ char *substr = NULL;
+ char *status = NULL;
+ call_frame_t *heal_frame = NULL;
+ afr_local_t *heal_local = NULL;
+
+ /*Use frame with lk-owner set*/
+ heal_frame = afr_frame_create(frame->this, &op_errno);
+ if (!heal_frame) {
+ ret = -1;
+ goto out;
+ }
+ heal_local = heal_frame->local;
+ heal_frame->local = frame->local;
+
+ ret = afr_lockless_inspect(heal_frame, this, loc->gfid, &inode,
+ &entry_selfheal, &data_selfheal,
+ &metadata_selfheal, &pending);
+
+ if (ret == -ENOMEM) {
+ ret = -1;
+ goto out;
+ }
+
+ if (pending & PFLAG_PENDING) {
+ gf_asprintf(&substr, "-pending");
+ if (!substr)
+ goto out;
+ }
+
+ if (ret == -EIO) {
+ ret = gf_asprintf(&status, "split-brain%s", substr ? substr : "");
+ if (ret < 0) {
+ goto out;
}
-
- quorum = priv->child_count / 2 + 1;
- if (priv->up_count >= (priv->down_count + quorum)) {
- return _gf_true;
+ dict = afr_set_heal_info(status);
+ if (!dict) {
+ ret = -1;
+ goto out;
}
-
- /*
- * Special case for even numbers of nodes: if we have exactly half
- * and that includes the first ("senior-most") node, then that counts
- * as quorum even if it wouldn't otherwise. This supports e.g. N=2
- * while preserving the critical property that there can only be one
- * such group.
+ } else if (ret == -EAGAIN) {
+ ret = gf_asprintf(&status, "possibly-healing%s", substr ? substr : "");
+ if (ret < 0) {
+ goto out;
+ }
+ dict = afr_set_heal_info(status);
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+ } else if (ret >= 0) {
+ /* value of ret = source index
+ * so ret >= 0 and at least one of the 3 booleans set to
+ * true means a source is identified; heal is required.
*/
- if ((priv->child_count % 2) == 0) {
- quorum = priv->child_count / 2;
- if (priv->up_count >= (priv->down_count + quorum)) {
- if (priv->child_up[0]) {
- return _gf_true;
- }
- }
+ if (!data_selfheal && !entry_selfheal && !metadata_selfheal) {
+ status = gf_strdup("no-heal");
+ if (!status) {
+ ret = -1;
+ goto out;
+ }
+ dict = afr_set_heal_info(status);
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+ } else {
+ ret = gf_asprintf(&status, "heal%s", substr ? substr : "");
+ if (ret < 0) {
+ goto out;
+ }
+ dict = afr_set_heal_info(status);
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+ }
+ } else if (ret < 0) {
+ /* Apart from above checked -ve ret values, there are
+ * other possible ret values like ENOTCONN
+ * (returned when number of valid replies received are
+ * less than 2)
+ * in which case heal is required when one of the
+ * selfheal booleans is set.
+ */
+ if (data_selfheal || entry_selfheal || metadata_selfheal) {
+ ret = gf_asprintf(&status, "heal%s", substr ? substr : "");
+ if (ret < 0) {
+ goto out;
+ }
+ dict = afr_set_heal_info(status);
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
}
+ }
+
+ ret = 0;
+ op_errno = 0;
out:
- return _gf_false;
+ if (heal_frame) {
+ heal_frame->local = heal_local;
+ AFR_STACK_DESTROY(heal_frame);
+ }
+ AFR_STACK_UNWIND(getxattr, frame, ret, op_errno, dict, NULL);
+ if (dict)
+ dict_unref(dict);
+ if (inode)
+ inode_unref(inode);
+ GF_FREE(substr);
+ return ret;
}
-void
-afr_priv_destroy (afr_private_t *priv)
-{
- int i = 0;
+int
+_afr_is_split_brain(call_frame_t *frame, xlator_t *this,
+ struct afr_reply *replies, afr_transaction_type type,
+ gf_boolean_t *spb)
+{
+ afr_private_t *priv = NULL;
+ uint64_t *witness = NULL;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ int sources_count = 0;
+ int ret = 0;
+
+ priv = this->private;
+
+ sources = alloca0(priv->child_count);
+ sinks = alloca0(priv->child_count);
+ witness = alloca0(priv->child_count * sizeof(*witness));
+
+ ret = afr_selfheal_find_direction(frame, this, replies, type,
+ priv->child_up, sources, sinks, witness,
+ NULL);
+ if (ret)
+ return ret;
- if (!priv)
- goto out;
- GF_FREE (priv->last_event);
- if (priv->pending_key) {
- for (i = 0; i < priv->child_count; i++)
- GF_FREE (priv->pending_key[i]);
- }
- GF_FREE (priv->pending_key);
- GF_FREE (priv->children);
- GF_FREE (priv->child_up);
- LOCK_DESTROY (&priv->lock);
+ sources_count = AFR_COUNT(sources, priv->child_count);
+ if (!sources_count)
+ *spb = _gf_true;
- GF_FREE (priv);
-out:
- return;
+ return ret;
}
-void
-afr_handle_open_fd_count (call_frame_t *frame, xlator_t *this)
+int
+afr_is_split_brain(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ uuid_t gfid, gf_boolean_t *d_spb, gf_boolean_t *m_spb)
{
- afr_local_t *local = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
+ int ret = -1;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
- local = frame->local;
+ priv = this->private;
+
+ replies = alloca0(sizeof(*replies) * priv->child_count);
- if (!local->fd)
- return;
+ ret = afr_selfheal_unlocked_discover(frame, inode, gfid, replies);
+ if (ret)
+ goto out;
- fd_ctx = afr_fd_ctx_get (local->fd, this);
- if (!fd_ctx)
- return;
+ if (!afr_can_decide_split_brain_source_sinks(replies, priv->child_count)) {
+ ret = -EAGAIN;
+ goto out;
+ }
- fd_ctx->open_fd_count = local->open_fd_count;
+ ret = _afr_is_split_brain(frame, this, replies, AFR_DATA_TRANSACTION,
+ d_spb);
+ if (ret)
+ goto out;
+
+ ret = _afr_is_split_brain(frame, this, replies, AFR_METADATA_TRANSACTION,
+ m_spb);
+out:
+ if (replies) {
+ afr_replies_wipe(replies, priv->child_count);
+ replies = NULL;
+ }
+ return ret;
}
-int**
-afr_mark_pending_changelog (afr_private_t *priv, unsigned char *pending,
- dict_t *xattr, ia_type_t iat)
+int
+afr_get_split_brain_status_cbk(int ret, call_frame_t *frame, void *opaque)
{
- int i = 0;
- int **changelog = NULL;
- int idx = -1;
- int m_idx = 0;
- int ret = 0;
+ GF_FREE(opaque);
+ return 0;
+}
- m_idx = afr_index_for_transaction_type (AFR_METADATA_TRANSACTION);
+int
+afr_get_split_brain_status(void *opaque)
+{
+ gf_boolean_t d_spb = _gf_false;
+ gf_boolean_t m_spb = _gf_false;
+ int ret = -1;
+ int op_errno = 0;
+ int i = 0;
+ char *choices = NULL;
+ char *status = NULL;
+ dict_t *dict = NULL;
+ inode_t *inode = NULL;
+ afr_private_t *priv = NULL;
+ xlator_t **children = NULL;
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ loc_t *loc = NULL;
+ afr_spb_status_t *data = NULL;
+
+ data = opaque;
+ frame = data->frame;
+ this = frame->this;
+ loc = data->loc;
+ priv = this->private;
+ children = priv->children;
+
+ inode = afr_inode_find(this, loc->gfid);
+ if (!inode)
+ goto out;
+
+ dict = dict_new();
+ if (!dict) {
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+
+ /* Calculation for string length :
+ * (child_count X length of child-name) + SLEN(" Choices :")
+ * child-name consists of :
+ * a) 251 = max characters for volname according to GD_VOLUME_NAME_MAX
+ * b) strlen("-client-00,") assuming 16 replicas
+ */
+ choices = alloca0(priv->child_count * (256 + SLEN("-client-00,")) +
+ SLEN(" Choices:"));
+
+ ret = afr_is_split_brain(frame, this, inode, loc->gfid, &d_spb, &m_spb);
+ if (ret) {
+ op_errno = -ret;
+ if (ret == -EAGAIN) {
+ ret = dict_set_sizen_str_sizen(dict, GF_AFR_SBRAIN_STATUS,
+ SBRAIN_HEAL_NO_GO_MSG);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret,
+ AFR_MSG_DICT_SET_FAILED,
+ "Failed to set GF_AFR_SBRAIN_STATUS in dict");
+ }
+ }
+ ret = -1;
+ goto out;
+ }
+
+ if (d_spb || m_spb) {
+ sprintf(choices, " Choices:");
+ for (i = 0; i < priv->child_count; i++) {
+ strcat(choices, children[i]->name);
+ strcat(choices, ",");
+ }
+ choices[strlen(choices) - 1] = '\0';
- idx = afr_index_from_ia_type (iat);
+ ret = gf_asprintf(&status,
+ "data-split-brain:%s "
+ "metadata-split-brain:%s%s",
+ (d_spb) ? "yes" : "no", (m_spb) ? "yes" : "no",
+ choices);
- changelog = afr_matrix_create (priv->child_count, AFR_NUM_CHANGE_LOGS);
- if (!changelog)
- goto out;
+ if (-1 == ret) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+ ret = dict_set_dynstr_sizen(dict, GF_AFR_SBRAIN_STATUS, status);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+ } else {
+ ret = dict_set_sizen_str_sizen(dict, GF_AFR_SBRAIN_STATUS,
+ SFILE_NOT_UNDER_DATA);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (!pending[i])
- continue;
-
- changelog[i][m_idx] = hton32(1);
- if (idx != -1)
- changelog[i][idx] = hton32(1);
- }
- ret = afr_set_pending_dict (priv, xattr, changelog);
- if (ret < 0) {
- afr_matrix_cleanup (changelog, priv->child_count);
- return NULL;
- }
+ ret = 0;
out:
- return changelog;
+ AFR_STACK_UNWIND(getxattr, frame, ret, op_errno, dict, NULL);
+ if (dict)
+ dict_unref(dict);
+ if (inode)
+ inode_unref(inode);
+ return ret;
}
-gf_boolean_t
-afr_decide_heal_info (afr_private_t *priv, unsigned char *sources, int ret)
+int32_t
+afr_heal_splitbrain_file(call_frame_t *frame, xlator_t *this, loc_t *loc)
{
- int sources_count = 0;
-
+ int ret = 0;
+ int op_errno = 0;
+ dict_t *dict = NULL;
+ afr_local_t *local = NULL;
+ afr_local_t *heal_local = NULL;
+ call_frame_t *heal_frame = NULL;
+
+ local = frame->local;
+ dict = dict_new();
+ if (!dict) {
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+
+ heal_frame = afr_frame_create(this, &op_errno);
+ if (!heal_frame) {
+ ret = -1;
+ goto out;
+ }
+ heal_local = heal_frame->local;
+ heal_frame->local = frame->local;
+ /*Initiate heal with heal_frame with lk-owner set so that inodelk/entrylk
+ * work correctly*/
+ ret = afr_selfheal_do(heal_frame, this, loc->gfid);
+
+ if (ret == 1 || ret == 2) {
+ ret = dict_set_sizen_str_sizen(dict, "sh-fail-msg",
+ SFILE_NOT_IN_SPLIT_BRAIN);
if (ret)
- goto out;
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Failed to set sh-fail-msg in dict");
+ ret = 0;
+ goto out;
+ } else {
+ if (local->xdata_rsp) {
+ /* 'sh-fail-msg' has been set in the dict during self-heal.*/
+ dict_copy(local->xdata_rsp, dict);
+ ret = 0;
+ } else if (ret < 0) {
+ op_errno = -ret;
+ ret = -1;
+ }
+ }
- sources_count = AFR_COUNT (sources, priv->child_count);
- if (sources_count == priv->child_count)
- return _gf_false;
out:
- return _gf_true;
+ if (heal_frame) {
+ heal_frame->local = heal_local;
+ AFR_STACK_DESTROY(heal_frame);
+ }
+ if (local->op == GF_FOP_GETXATTR)
+ AFR_STACK_UNWIND(getxattr, frame, ret, op_errno, dict, NULL);
+ else if (local->op == GF_FOP_SETXATTR)
+ AFR_STACK_UNWIND(setxattr, frame, ret, op_errno, NULL);
+ if (dict)
+ dict_unref(dict);
+ return ret;
}
int
-afr_selfheal_locked_metadata_inspect (call_frame_t *frame, xlator_t *this,
- inode_t *inode, gf_boolean_t *msh,
- gf_boolean_t *pending)
+afr_get_child_index_from_name(xlator_t *this, char *name)
{
- int ret = -1;
- unsigned char *locked_on = NULL;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *healed_sinks = NULL;
- struct afr_reply *locked_replies = NULL;
+ afr_private_t *priv = this->private;
+ int index = -1;
- afr_private_t *priv = this->private;
-
- locked_on = alloca0 (priv->child_count);
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
-
- locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count);
-
- ret = afr_selfheal_inodelk (frame, this, inode, this->name,
- LLONG_MAX - 1, 0, locked_on);
- {
- if (ret == 0) {
- /* Not a single lock */
- ret = -afr_final_errno (frame->local, priv);
- if (ret == 0)
- ret = -ENOTCONN;/* all invalid responses */
- goto out;
- }
- ret = __afr_selfheal_metadata_prepare (frame, this, inode,
- locked_on, sources,
- sinks, healed_sinks,
- locked_replies,
- pending);
- *msh = afr_decide_heal_info (priv, sources, ret);
- }
- afr_selfheal_uninodelk (frame, this, inode, this->name,
- LLONG_MAX - 1, 0, locked_on);
+ for (index = 0; index < priv->child_count; index++) {
+ if (!strcmp(priv->children[index]->name, name))
+ goto out;
+ }
+ index = -1;
out:
- if (locked_replies)
- afr_replies_wipe (locked_replies, priv->child_count);
- return ret;
+ return index;
}
-int
-afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this,
- inode_t *inode, gf_boolean_t *dsh,
- gf_boolean_t *pflag)
-{
- int ret = -1;
- unsigned char *locked_on = NULL;
- unsigned char *data_lock = NULL;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *healed_sinks = NULL;
- afr_private_t *priv = NULL;
- fd_t *fd = NULL;
- struct afr_reply *locked_replies = NULL;
-
- priv = this->private;
- locked_on = alloca0 (priv->child_count);
- data_lock = alloca0 (priv->child_count);
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
-
- /* Heal-info does an open() on the file being examined so that the
- * current eager-lock holding client, if present, at some point sees
- * open-fd count being > 1 and releases the eager-lock so that heal-info
- * doesn't remain blocked forever until IO completes.
- */
- ret = afr_selfheal_data_open (this, inode, &fd);
- if (ret < 0) {
- gf_msg_debug (this->name, -ret, "%s: Failed to open",
- uuid_utoa (inode->gfid));
- goto out;
- }
+void
+afr_priv_need_heal_set(afr_private_t *priv, gf_boolean_t need_heal)
+{
+ LOCK(&priv->lock);
+ {
+ priv->need_heal = need_heal;
+ }
+ UNLOCK(&priv->lock);
+}
- locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count);
+void
+afr_set_need_heal(xlator_t *this, afr_local_t *local)
+{
+ int i = 0;
+ afr_private_t *priv = this->private;
+ gf_boolean_t need_heal = _gf_false;
- ret = afr_selfheal_tryinodelk (frame, this, inode, priv->sh_domain,
- 0, 0, locked_on);
- {
- if (ret == 0) {
- ret = -afr_final_errno (frame->local, priv);
- if (ret == 0)
- ret = -ENOTCONN;/* all invalid responses */
- goto out;
- }
- ret = afr_selfheal_inodelk (frame, this, inode, this->name,
- 0, 0, data_lock);
- {
- if (ret == 0) {
- ret = -afr_final_errno (frame->local, priv);
- if (ret == 0)
- ret = -ENOTCONN;
- /* all invalid responses */
- goto unlock;
- }
- ret = __afr_selfheal_data_prepare (frame, this, inode,
- data_lock, sources,
- sinks, healed_sinks,
- locked_replies,
- pflag);
- *dsh = afr_decide_heal_info (priv, sources, ret);
- }
- afr_selfheal_uninodelk (frame, this, inode, this->name, 0, 0,
- data_lock);
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].valid && local->replies[i].need_heal) {
+ need_heal = _gf_true;
+ break;
}
-unlock:
- afr_selfheal_uninodelk (frame, this, inode, priv->sh_domain, 0, 0,
- locked_on);
-out:
- if (locked_replies)
- afr_replies_wipe (locked_replies, priv->child_count);
- if (fd)
- fd_unref (fd);
- return ret;
+ }
+ afr_priv_need_heal_set(priv, need_heal);
+ return;
}
-int
-afr_selfheal_locked_entry_inspect (call_frame_t *frame, xlator_t *this,
- inode_t *inode,
- gf_boolean_t *esh, gf_boolean_t *pflag)
-{
- int ret = -1;
- int source = -1;
- afr_private_t *priv = NULL;
- unsigned char *locked_on = NULL;
- unsigned char *data_lock = NULL;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *healed_sinks = NULL;
- struct afr_reply *locked_replies = NULL;
-
- priv = this->private;
- locked_on = alloca0 (priv->child_count);
- data_lock = alloca0 (priv->child_count);
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
-
- locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count);
-
- ret = afr_selfheal_tryentrylk (frame, this, inode, priv->sh_domain,
- NULL, locked_on);
- {
- if (ret == 0) {
- ret = -afr_final_errno (frame->local, priv);
- if (ret == 0)
- ret = -ENOTCONN;/* all invalid responses */
- goto out;
- }
+gf_boolean_t
+afr_get_need_heal(xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ gf_boolean_t need_heal = _gf_true;
- ret = afr_selfheal_entrylk (frame, this, inode, this->name,
- NULL, data_lock);
- {
- if (ret == 0) {
- ret = -afr_final_errno (frame->local, priv);
- if (ret == 0)
- ret = -ENOTCONN;
- /* all invalid responses */
- goto unlock;
- }
- ret = __afr_selfheal_entry_prepare (frame, this, inode,
- data_lock, sources,
- sinks, healed_sinks,
- locked_replies,
- &source, pflag);
- if ((ret == 0) && source < 0)
- ret = -EIO;
- *esh = afr_decide_heal_info (priv, sources, ret);
- }
- afr_selfheal_unentrylk (frame, this, inode, this->name, NULL,
- data_lock);
- }
-unlock:
- afr_selfheal_unentrylk (frame, this, inode, priv->sh_domain, NULL,
- locked_on);
-out:
- if (locked_replies)
- afr_replies_wipe (locked_replies, priv->child_count);
- return ret;
+ LOCK(&priv->lock);
+ {
+ need_heal = priv->need_heal;
+ }
+ UNLOCK(&priv->lock);
+ return need_heal;
}
int
-afr_selfheal_locked_inspect (call_frame_t *frame, xlator_t *this, uuid_t gfid,
- inode_t **inode,
- gf_boolean_t *entry_selfheal,
- gf_boolean_t *data_selfheal,
- gf_boolean_t *metadata_selfheal,
- gf_boolean_t *pending)
+afr_get_msg_id(char *op_type)
+{
+ if (!strcmp(op_type, GF_AFR_REPLACE_BRICK))
+ return AFR_MSG_REPLACE_BRICK_STATUS;
+ else if (!strcmp(op_type, GF_AFR_ADD_BRICK))
+ return AFR_MSG_ADD_BRICK_STATUS;
+ return -1;
+}
+int
+afr_fav_child_reset_sink_xattrs_cbk(int ret, call_frame_t *heal_frame,
+ void *opaque)
{
- int ret = -1;
- gf_boolean_t dsh = _gf_false;
- gf_boolean_t msh = _gf_false;
- gf_boolean_t esh = _gf_false;
+ call_frame_t *txn_frame = NULL;
+ afr_local_t *local = NULL;
+ afr_local_t *heal_local = NULL;
+ xlator_t *this = NULL;
- ret = afr_selfheal_unlocked_inspect (frame, this, gfid, inode,
- &dsh, &msh, &esh);
- if (ret)
- goto out;
+ heal_local = heal_frame->local;
+ txn_frame = heal_local->heal_frame;
+ local = txn_frame->local;
+ this = txn_frame->this;
- /* For every heal type hold locks and check if it indeed needs heal */
+ /* Refresh the inode agan and proceed with the transaction.*/
+ afr_inode_refresh(txn_frame, this, local->inode, NULL, local->refreshfn);
- if (msh) {
- ret = afr_selfheal_locked_metadata_inspect (frame, this,
- *inode, &msh,
- pending);
- if (ret == -EIO)
- goto out;
- }
+ AFR_STACK_DESTROY(heal_frame);
- if (dsh) {
- ret = afr_selfheal_locked_data_inspect (frame, this, *inode,
- &dsh, pending);
- if (ret == -EIO || (ret == -EAGAIN))
- goto out;
- }
+ return 0;
+}
- if (esh) {
- ret = afr_selfheal_locked_entry_inspect (frame, this, *inode,
- &esh, pending);
+int
+afr_fav_child_reset_sink_xattrs(void *opaque)
+{
+ call_frame_t *heal_frame = NULL;
+ call_frame_t *txn_frame = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t d_spb = _gf_false;
+ gf_boolean_t m_spb = _gf_false;
+ afr_local_t *heal_local = NULL;
+ afr_local_t *txn_local = NULL;
+ afr_private_t *priv = NULL;
+ inode_t *inode = NULL;
+ unsigned char *locked_on = NULL;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ unsigned char *healed_sinks = NULL;
+ unsigned char *undid_pending = NULL;
+ struct afr_reply *locked_replies = NULL;
+ int ret = 0;
+
+ heal_frame = (call_frame_t *)opaque;
+ heal_local = heal_frame->local;
+ txn_frame = heal_local->heal_frame;
+ txn_local = txn_frame->local;
+ this = txn_frame->this;
+ inode = txn_local->inode;
+ priv = this->private;
+ locked_on = alloca0(priv->child_count);
+ sources = alloca0(priv->child_count);
+ sinks = alloca0(priv->child_count);
+ healed_sinks = alloca0(priv->child_count);
+ undid_pending = alloca0(priv->child_count);
+ locked_replies = alloca0(sizeof(*locked_replies) * priv->child_count);
+
+ ret = _afr_is_split_brain(txn_frame, this, txn_local->replies,
+ AFR_DATA_TRANSACTION, &d_spb);
+
+ ret = _afr_is_split_brain(txn_frame, this, txn_local->replies,
+ AFR_METADATA_TRANSACTION, &m_spb);
+
+ /* Take appropriate locks and reset sink xattrs. */
+ if (d_spb) {
+ ret = afr_selfheal_inodelk(heal_frame, this, inode, this->name, 0, 0,
+ locked_on);
+ {
+ if (ret < priv->child_count)
+ goto data_unlock;
+ ret = __afr_selfheal_data_prepare(
+ heal_frame, this, inode, locked_on, sources, sinks,
+ healed_sinks, undid_pending, locked_replies, NULL);
+ }
+ data_unlock:
+ afr_selfheal_uninodelk(heal_frame, this, inode, this->name, 0, 0,
+ locked_on);
+ }
+
+ if (m_spb) {
+ memset(locked_on, 0, sizeof(*locked_on) * priv->child_count);
+ memset(undid_pending, 0, sizeof(*undid_pending) * priv->child_count);
+ ret = afr_selfheal_inodelk(heal_frame, this, inode, this->name,
+ LLONG_MAX - 1, 0, locked_on);
+ {
+ if (ret < priv->child_count)
+ goto mdata_unlock;
+ ret = __afr_selfheal_metadata_prepare(
+ heal_frame, this, inode, locked_on, sources, sinks,
+ healed_sinks, undid_pending, locked_replies, NULL);
}
+ mdata_unlock:
+ afr_selfheal_uninodelk(heal_frame, this, inode, this->name,
+ LLONG_MAX - 1, 0, locked_on);
+ }
-out:
- *data_selfheal = dsh;
- *entry_selfheal = esh;
- *metadata_selfheal = msh;
- return ret;
+ return ret;
}
-dict_t*
-afr_set_heal_info (char *status)
-{
- dict_t *dict = NULL;
- int ret = -1;
-
- dict = dict_new ();
- if (!dict) {
- ret = -ENOMEM;
+/*
+ * Concatenates the xattrs in local->replies separated by a delimiter.
+ */
+int
+afr_serialize_xattrs_with_delimiter(call_frame_t *frame, xlator_t *this,
+ char *buf, const char *default_str,
+ int32_t *serz_len, char delimiter)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ char *xattr = NULL;
+ int i = 0;
+ int len = 0;
+ int keylen = 0;
+ size_t str_len = 0;
+ int ret = -1;
+
+ priv = this->private;
+ local = frame->local;
+
+ keylen = strlen(local->cont.getxattr.name);
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid || local->replies[i].op_ret) {
+ str_len = strlen(default_str);
+ buf = strncat(buf, default_str, str_len);
+ len += str_len;
+ buf[len++] = delimiter;
+ buf[len] = '\0';
+ } else {
+ ret = dict_get_strn(local->replies[i].xattr,
+ local->cont.getxattr.name, keylen, &xattr);
+ if (ret) {
+ gf_msg("TEST", GF_LOG_ERROR, -ret, AFR_MSG_DICT_GET_FAILED,
+ "Failed to get the node_uuid of brick "
+ "%d",
+ i);
goto out;
- }
+ }
+ str_len = strlen(xattr);
+ buf = strncat(buf, xattr, str_len);
+ len += str_len;
+ buf[len++] = delimiter;
+ buf[len] = '\0';
+ }
+ }
+ buf[--len] = '\0'; /*remove the last delimiter*/
+ if (serz_len)
+ *serz_len = ++len;
+ ret = 0;
- ret = dict_set_str (dict, "heal-info", status);
- if (ret)
- gf_msg ("", GF_LOG_WARNING, -ret,
- AFR_MSG_DICT_SET_FAILED,
- "Failed to set heal-info key to "
- "%s", status);
out:
- return dict;
+ return ret;
}
-int
-afr_get_heal_info (call_frame_t *frame, xlator_t *this, loc_t *loc)
-{
- gf_boolean_t data_selfheal = _gf_false;
- gf_boolean_t metadata_selfheal = _gf_false;
- gf_boolean_t entry_selfheal = _gf_false;
- gf_boolean_t pending = _gf_false;
- dict_t *dict = NULL;
- int ret = -1;
- int op_errno = 0;
- int size = 0;
- inode_t *inode = NULL;
- char *substr = NULL;
- char *status = NULL;
-
- ret = afr_selfheal_locked_inspect (frame, this, loc->gfid, &inode,
- &entry_selfheal,
- &data_selfheal, &metadata_selfheal,
- &pending);
-
- if (ret == -ENOMEM) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
-
- if (pending) {
- size = strlen ("-pending") + 1;
- gf_asprintf (&substr, "-pending");
- if (!substr)
- goto out;
- }
+uint64_t
+afr_write_subvol_get(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ uint64_t write_subvol = 0;
- if (ret == -EIO) {
- size += strlen ("split-brain") + 1;
- ret = gf_asprintf (&status, "split-brain%s",
- substr? substr : "");
- if (ret < 0)
- goto out;
- dict = afr_set_heal_info (status);
- } else if (ret == -EAGAIN) {
- size += strlen ("possibly-healing") + 1;
- ret = gf_asprintf (&status, "possibly-healing%s",
- substr? substr : "");
- if (ret < 0)
- goto out;
- dict = afr_set_heal_info (status);
- } else if (ret >= 0) {
- /* value of ret = source index
- * so ret >= 0 and at least one of the 3 booleans set to
- * true means a source is identified; heal is required.
- */
- if (!data_selfheal && !entry_selfheal &&
- !metadata_selfheal) {
- dict = afr_set_heal_info ("no-heal");
- } else {
- size += strlen ("heal") + 1;
- ret = gf_asprintf (&status, "heal%s",
- substr? substr : "");
- if (ret < 0)
- goto out;
- dict = afr_set_heal_info (status);
- }
- } else if (ret < 0) {
- /* Apart from above checked -ve ret values, there are
- * other possible ret values like ENOTCONN
- * (returned when number of valid replies received are
- * less than 2)
- * in which case heal is required when one of the
- * selfheal booleans is set.
- */
- if (data_selfheal || entry_selfheal ||
- metadata_selfheal) {
- size += strlen ("heal") + 1;
- ret = gf_asprintf (&status, "heal%s",
- substr? substr : "");
- if (ret < 0)
- goto out;
- dict = afr_set_heal_info (status);
- }
- }
- ret = 0;
+ local = frame->local;
+ LOCK(&local->inode->lock);
+ write_subvol = local->inode_ctx->write_subvol;
+ UNLOCK(&local->inode->lock);
-out:
- AFR_STACK_UNWIND (getxattr, frame, ret, op_errno, dict, NULL);
- if (dict)
- dict_unref (dict);
- if (inode)
- inode_unref (inode);
- GF_FREE (substr);
- return ret;
+ return write_subvol;
}
int
-_afr_is_split_brain (call_frame_t *frame, xlator_t *this,
- struct afr_reply *replies,
- afr_transaction_type type,
- gf_boolean_t *spb)
-{
- afr_private_t *priv = NULL;
- uint64_t *witness = NULL;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- int sources_count = 0;
- int ret = 0;
-
- priv = this->private;
-
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- witness = alloca0(priv->child_count * sizeof (*witness));
-
- ret = afr_selfheal_find_direction (frame, this, replies,
- type, priv->child_up, sources,
- sinks, witness, NULL);
- if (ret)
- return ret;
-
- sources_count = AFR_COUNT (sources, priv->child_count);
- if (!sources_count)
- *spb = _gf_true;
-
- return ret;
+afr_write_subvol_set(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ unsigned char *data_accused = NULL;
+ unsigned char *metadata_accused = NULL;
+ unsigned char *data_readable = NULL;
+ unsigned char *metadata_readable = NULL;
+ uint16_t datamap = 0;
+ uint16_t metadatamap = 0;
+ uint64_t val = 0;
+ int event = 0;
+ int i = 0;
+
+ local = frame->local;
+ priv = this->private;
+ data_accused = alloca0(priv->child_count);
+ metadata_accused = alloca0(priv->child_count);
+ data_readable = alloca0(priv->child_count);
+ metadata_readable = alloca0(priv->child_count);
+ event = local->event_generation;
+
+ afr_readables_fill(frame, this, local->inode, data_accused,
+ metadata_accused, data_readable, metadata_readable,
+ NULL);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (data_readable[i])
+ datamap |= (1 << i);
+ if (metadata_readable[i])
+ metadatamap |= (1 << i);
+ }
+
+ val = ((uint64_t)metadatamap) | (((uint64_t)datamap) << 16) |
+ (((uint64_t)event) << 32);
+
+ LOCK(&local->inode->lock);
+ {
+ if (local->inode_ctx->write_subvol == 0 &&
+ local->transaction.type == AFR_DATA_TRANSACTION) {
+ local->inode_ctx->write_subvol = val;
+ }
+ }
+ UNLOCK(&local->inode->lock);
+
+ return 0;
}
int
-afr_is_split_brain (call_frame_t *frame, xlator_t *this, inode_t *inode,
- uuid_t gfid, gf_boolean_t *d_spb, gf_boolean_t *m_spb)
+afr_write_subvol_reset(call_frame_t *frame, xlator_t *this)
{
- int ret = -1;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
+ afr_local_t *local = NULL;
- priv = this->private;
+ local = frame->local;
+ LOCK(&local->inode->lock);
+ {
+ GF_ASSERT(local->inode_ctx->lock_count > 0);
+ local->inode_ctx->lock_count--;
- replies = alloca0 (sizeof (*replies) * priv->child_count);
+ if (!local->inode_ctx->lock_count)
+ local->inode_ctx->write_subvol = 0;
+ }
+ UNLOCK(&local->inode->lock);
- ret = afr_selfheal_unlocked_discover (frame, inode, gfid, replies);
- if (ret)
- goto out;
-
- ret = _afr_is_split_brain (frame, this, replies,
- AFR_DATA_TRANSACTION, d_spb);
- if (ret)
- goto out;
-
- ret = _afr_is_split_brain (frame, this, replies,
- AFR_METADATA_TRANSACTION, m_spb);
-out:
- if (replies) {
- afr_replies_wipe (replies, priv->child_count);
- replies = NULL;
- }
- return ret;
+ return 0;
}
int
-afr_get_split_brain_status_cbk (int ret, call_frame_t *frame, void *opaque)
+afr_set_inode_local(xlator_t *this, afr_local_t *local, inode_t *inode)
{
- GF_FREE (opaque);
- return 0;
-}
+ int ret = 0;
-int
-afr_get_split_brain_status (void *opaque)
-{
- gf_boolean_t d_spb = _gf_false;
- gf_boolean_t m_spb = _gf_false;
- int ret = -1;
- int op_errno = 0;
- int i = 0;
- char *choices = NULL;
- char *status = NULL;
- dict_t *dict = NULL;
- inode_t *inode = NULL;
- afr_private_t *priv = NULL;
- xlator_t **children = NULL;
- call_frame_t *frame = NULL;
- xlator_t *this = NULL;
- loc_t *loc = NULL;
- afr_spb_status_t *data = NULL;
-
- data = opaque;
- frame = data->frame;
- this = frame->this;
- loc = data->loc;
- priv = this->private;
- children = priv->children;
-
- inode = afr_inode_find (this, loc->gfid);
- if (!inode)
- goto out;
+ local->inode = inode_ref(inode);
+ LOCK(&local->inode->lock);
+ {
+ ret = __afr_inode_ctx_get(this, local->inode, &local->inode_ctx);
+ }
+ UNLOCK(&local->inode->lock);
+ if (ret < 0) {
+ gf_msg_callingfn(
+ this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_INODE_CTX_GET_FAILED,
+ "Error getting inode ctx %s", uuid_utoa(local->inode->gfid));
+ }
+ return ret;
+}
- /* Calculation for string length :
- * (child_count X length of child-name) + strlen (" Choices :")
- * child-name consists of :
- * a) 256 = max characters for volname according to GD_VOLUME_NAME_MAX
- * b) strlen ("-client-00,") assuming 16 replicas
- */
- choices = alloca0 (priv->child_count * (256 + strlen ("-client-00,")) +
- strlen (" Choices:"));
+gf_boolean_t
+afr_ta_is_fop_called_from_synctask(xlator_t *this)
+{
+ struct synctask *task = NULL;
+ gf_lkowner_t tmp_owner = {
+ 0,
+ };
- ret = afr_is_split_brain (frame, this, inode, loc->gfid, &d_spb,
- &m_spb);
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
+ task = synctask_get();
+ if (!task)
+ return _gf_false;
- dict = dict_new ();
- if (!dict) {
- op_errno = ENOMEM;
- ret = -1;
- goto out;
- }
+ set_lk_owner_from_ptr(&tmp_owner, (void *)this);
- if (d_spb || m_spb) {
- sprintf (choices, " Choices:");
- for (i = 0; i < priv->child_count; i++) {
- strcat (choices, children[i]->name);
- strcat (choices, ",");
- }
- choices[strlen (choices) - 1] = '\0';
+ if (!is_same_lkowner(&tmp_owner, &task->frame->root->lk_owner))
+ return _gf_false;
- ret = gf_asprintf (&status, "data-split-brain:%s "
- "metadata-split-brain:%s%s",
- (d_spb) ? "yes" : "no",
- (m_spb) ? "yes" : "no", choices);
+ return _gf_true;
+}
- if (-1 == ret) {
- op_errno = ENOMEM;
- goto out;
- }
- ret = dict_set_dynstr (dict, GF_AFR_SBRAIN_STATUS, status);
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
+int
+afr_ta_post_op_lock(xlator_t *this, loc_t *loc)
+{
+ int ret = 0;
+ uuid_t gfid = {
+ 0,
+ };
+ afr_private_t *priv = this->private;
+ gf_boolean_t locked = _gf_false;
+ struct gf_flock flock1 = {
+ 0,
+ };
+ struct gf_flock flock2 = {
+ 0,
+ };
+ int32_t cmd = 0;
+
+ /* Clients must take AFR_TA_DOM_NOTIFY lock only when the previous lock
+ * has been released in afr_notify due to upcall notification from shd.
+ */
+ GF_ASSERT(priv->ta_notify_dom_lock_offset == 0);
+
+ if (!priv->shd.iamshd)
+ GF_ASSERT(afr_ta_is_fop_called_from_synctask(this));
+ flock1.l_type = F_WRLCK;
+
+ while (!locked) {
+ if (priv->shd.iamshd) {
+ cmd = F_SETLKW;
+ flock1.l_start = 0;
+ flock1.l_len = 0;
} else {
- ret = dict_set_str (dict, GF_AFR_SBRAIN_STATUS,
- "The file is not under data or"
- " metadata split-brain");
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
- }
+ cmd = F_SETLK;
+ gf_uuid_generate(gfid);
+ flock1.l_start = gfid_to_ino(gfid);
+ if (flock1.l_start < 0)
+ flock1.l_start = -flock1.l_start;
+ flock1.l_len = 1;
+ }
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_NOTIFY, loc, cmd, &flock1, NULL, NULL);
+ if (!ret) {
+ locked = _gf_true;
+ priv->ta_notify_dom_lock_offset = flock1.l_start;
+ } else if (ret == -EAGAIN) {
+ continue;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to get "
+ "AFR_TA_DOM_NOTIFY lock on %s.",
+ loc->name);
+ goto out;
+ }
+ }
+
+ flock2.l_type = F_WRLCK;
+ flock2.l_start = 0;
+ flock2.l_len = 0;
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_MODIFY, loc, F_SETLKW, &flock2, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to get AFR_TA_DOM_MODIFY lock on %s.", loc->name);
+ flock1.l_type = F_UNLCK;
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_NOTIFY, loc, F_SETLK, &flock1, NULL,
+ NULL);
+ }
+out:
+ return ret;
+}
- ret = 0;
+int
+afr_ta_post_op_unlock(xlator_t *this, loc_t *loc)
+{
+ afr_private_t *priv = this->private;
+ struct gf_flock flock = {
+ 0,
+ };
+ int ret = 0;
+
+ if (!priv->shd.iamshd)
+ GF_ASSERT(afr_ta_is_fop_called_from_synctask(this));
+ flock.l_type = F_UNLCK;
+ flock.l_start = 0;
+ flock.l_len = 0;
+
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_MODIFY, loc, F_SETLK, &flock, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to unlock AFR_TA_DOM_MODIFY lock.");
+ goto out;
+ }
+
+ if (!priv->shd.iamshd)
+ /* Mounts (clients) will not release the AFR_TA_DOM_NOTIFY lock
+ * in post-op as they use it as a notification mechanism. When
+ * shd sends a lock request on TA during heal, the clients will
+ * receive a lock-contention upcall notification upon which they
+ * will release the AFR_TA_DOM_NOTIFY lock after completing the
+ * in flight I/O.*/
+ goto out;
+
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_NOTIFY, loc, F_SETLK, &flock, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to unlock AFR_TA_DOM_NOTIFY lock.");
+ }
out:
- AFR_STACK_UNWIND (getxattr, frame, ret, op_errno, dict, NULL);
- if (dict)
- dict_unref (dict);
- if (inode)
- inode_unref (inode);
- return ret;
+ return ret;
}
-int32_t
-afr_heal_splitbrain_file(call_frame_t *frame, xlator_t *this, loc_t *loc)
+call_frame_t *
+afr_ta_frame_create(xlator_t *this)
{
- int ret = 0;
- int op_errno = 0;
- dict_t *dict = NULL;
- afr_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ void *lk_owner = NULL;
- local = frame->local;
- dict = dict_new ();
- if (!dict) {
- op_errno = ENOMEM;
- ret = -1;
- goto out;
- }
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame)
+ return NULL;
+ lk_owner = (void *)this;
+ afr_set_lk_owner(frame, this, lk_owner);
+ return frame;
+}
- ret = afr_selfheal_do (frame, this, loc->gfid);
+gf_boolean_t
+afr_ta_has_quorum(afr_private_t *priv, afr_local_t *local)
+{
+ int data_count = 0;
- if (ret == 1 || ret == 2) {
- ret = dict_set_str (dict, "sh-fail-msg",
- "File not in split-brain");
- if (ret)
- gf_msg (this->name, GF_LOG_WARNING,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Failed to set sh-fail-msg in dict");
- ret = 0;
- goto out;
- } else {
- if (local->xdata_rsp) {
- /* 'sh-fail-msg' has been set in the dict during self-heal.*/
- dict_copy (local->xdata_rsp, dict);
- ret = 0;
- } else if (ret < 0) {
- op_errno = -ret;
- ret = -1;
- }
- }
+ data_count = AFR_COUNT(local->child_up, priv->child_count);
+ if (data_count == 2) {
+ return _gf_true;
+ } else if (data_count == 1 && local->ta_child_up) {
+ return _gf_true;
+ }
-out:
- if (local->op == GF_FOP_GETXATTR)
- AFR_STACK_UNWIND (getxattr, frame, ret, op_errno, dict, NULL);
- else if (local->op == GF_FOP_SETXATTR)
- AFR_STACK_UNWIND (setxattr, frame, ret, op_errno, NULL);
- if (dict)
- dict_unref(dict);
- return ret;
+ return _gf_false;
}
-int
-afr_get_child_index_from_name (xlator_t *this, char *name)
+static gf_boolean_t
+afr_is_add_replica_mount_lookup_on_root(call_frame_t *frame)
{
- afr_private_t *priv = this->private;
- int index = -1;
+ afr_local_t *local = NULL;
- for (index = 0; index < priv->child_count; index++) {
- if (!strcmp (priv->children[index]->name, name))
- goto out;
- }
- index = -1;
-out:
- return index;
+ if (frame->root->pid != GF_CLIENT_PID_ADD_REPLICA_MOUNT)
+ return _gf_false;
+
+ local = frame->local;
+
+ if (local->op != GF_FOP_LOOKUP)
+ /* TODO:If the replica count is being increased on a plain distribute
+ * volume that was never mounted, we need to allow setxattr on '/' with
+ * GF_CLIENT_PID_NO_ROOT_SQUASH to accomodate for DHT layout setting */
+ return _gf_false;
+
+ if (local->inode == NULL)
+ return _gf_false;
+
+ if (!__is_root_gfid(local->inode->gfid))
+ return _gf_false;
+
+ return _gf_true;
}
-void
-afr_priv_need_heal_set (afr_private_t *priv, gf_boolean_t need_heal)
+gf_boolean_t
+afr_lookup_has_quorum(call_frame_t *frame, const unsigned int up_children_count)
{
- LOCK (&priv->lock);
- {
- priv->need_heal = need_heal;
- }
- UNLOCK (&priv->lock);
+ if (frame && (up_children_count > 0) &&
+ afr_is_add_replica_mount_lookup_on_root(frame))
+ return _gf_true;
+
+ return _gf_false;
}
void
-afr_set_need_heal (xlator_t *this, afr_local_t *local)
+afr_handle_replies_quorum(call_frame_t *frame, xlator_t *this)
{
- int i = 0;
- afr_private_t *priv = this->private;
- gf_boolean_t need_heal = _gf_false;
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ unsigned char *success_replies = NULL;
- for (i = 0; i < priv->child_count; i++) {
- if (local->replies[i].valid && local->replies[i].need_heal) {
- need_heal = _gf_true;
- break;
- }
- }
- afr_priv_need_heal_set (priv, need_heal);
- return;
+ success_replies = alloca0(priv->child_count);
+ afr_fill_success_replies(local, priv, success_replies);
+
+ if (priv->quorum_count && !afr_has_quorum(success_replies, this, NULL)) {
+ local->op_errno = afr_final_errno(local, priv);
+ if (!local->op_errno)
+ local->op_errno = afr_quorum_errno(priv);
+ local->op_ret = -1;
+ }
}
gf_boolean_t
-afr_get_need_heal (xlator_t *this)
-{
- afr_private_t *priv = this->private;
- gf_boolean_t need_heal = _gf_true;
-
- LOCK (&priv->lock);
- {
- need_heal = priv->need_heal;
+afr_ta_dict_contains_pending_xattr(dict_t *dict, afr_private_t *priv, int child)
+{
+ int *pending = NULL;
+ int ret = 0;
+ int i = 0;
+
+ ret = dict_get_ptr(dict, priv->pending_key[child], (void *)&pending);
+ if (ret == 0) {
+ for (i = 0; i < AFR_NUM_CHANGE_LOGS; i++) {
+ /* Not doing a ntoh32(pending) as we just want to check
+ * if it is non-zero or not. */
+ if (pending[i]) {
+ return _gf_true;
+ }
}
- UNLOCK (&priv->lock);
- return need_heal;
-}
+ }
-int
-afr_get_msg_id (char *op_type)
-{
-
- if (!strcmp (op_type, GF_AFR_REPLACE_BRICK))
- return AFR_MSG_REPLACE_BRICK_STATUS;
- else if (!strcmp (op_type, GF_AFR_ADD_BRICK))
- return AFR_MSG_ADD_BRICK_STATUS;
- return -1;
+ return _gf_false;
}
diff --git a/xlators/cluster/afr/src/afr-dir-read.c b/xlators/cluster/afr/src/afr-dir-read.c
index 841c64361cf..f8bf8340dab 100644
--- a/xlators/cluster/afr/src/afr-dir-read.c
+++ b/xlators/cluster/afr/src/afr-dir-read.c
@@ -8,344 +8,339 @@
cases as published by the Free Software Foundation.
*/
-
#include <libgen.h>
#include <unistd.h>
-#include <fnmatch.h>
#include <sys/time.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
-#include "glusterfs.h"
-#include "dict.h"
-#include "xlator.h"
-#include "hashfn.h"
-#include "logging.h"
-#include "stack.h"
-#include "list.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "common-utils.h"
-#include "compat-errno.h"
-#include "compat.h"
-#include "checksum.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/list.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/compat.h>
#include "afr.h"
#include "afr-transaction.h"
-
int32_t
-afr_opendir_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- fd_t *fd, dict_t *xdata)
+afr_opendir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int call_count = -1;
- int32_t child_index = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
-
- local = frame->local;
- fd_ctx = local->fd_ctx;
- child_index = (long) cookie;
-
- LOCK (&frame->lock);
- {
- if (op_ret == -1) {
- local->op_errno = op_errno;
- fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
- } else {
- local->op_ret = op_ret;
- fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ int32_t child_index = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ local = frame->local;
+ fd_ctx = local->fd_ctx;
+ child_index = (long)cookie;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+
+ LOCK(&frame->lock);
+ {
+ if (op_ret == -1) {
+ local->op_errno = op_errno;
+ fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
+ } else {
+ local->op_ret = op_ret;
+ fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
}
- UNLOCK (&frame->lock);
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
- call_count = afr_frame_return (frame);
+ if (call_count == 0) {
+ afr_handle_replies_quorum(frame, this);
+ AFR_STACK_UNWIND(opendir, frame, local->op_ret, local->op_errno,
+ local->fd, NULL);
+ }
- if (call_count == 0)
- AFR_STACK_UNWIND (opendir, frame, local->op_ret,
- local->op_errno, local->fd, NULL);
- return 0;
+ return 0;
}
-
int
-afr_opendir (call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd)
+afr_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata)
{
- afr_private_t * priv = NULL;
- afr_local_t * local = NULL;
- int i = 0;
- int call_count = -1;
- int32_t op_errno = ENOMEM;
- afr_fd_ctx_t *fd_ctx = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+ int call_count = -1;
+ int32_t op_errno = ENOMEM;
+ afr_fd_ctx_t *fd_ctx = NULL;
- priv = this->private;
+ priv = this->private;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- goto out;
+ local->op = GF_FOP_OPENDIR;
- loc_copy (&local->loc, loc);
+ if (priv->quorum_count && !afr_has_quorum(local->child_up, this, NULL)) {
+ op_errno = afr_quorum_errno(priv);
+ goto out;
+ }
- local->fd = fd_ref (fd);
- local->fd_ctx = fd_ctx;
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
- call_count = local->call_count;
+ fd_ctx = afr_fd_ctx_get(fd, this);
+ if (!fd_ctx)
+ goto out;
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_opendir_cbk,
- (void*) (long) i,
- priv->children[i],
- priv->children[i]->fops->opendir,
- loc, fd, NULL);
+ loc_copy(&local->loc, loc);
- if (!--call_count)
- break;
- }
+ local->fd = fd_ref(fd);
+ local->fd_ctx = fd_ctx;
+
+ call_count = local->call_count;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(frame, afr_opendir_cbk, (void *)(long)i,
+ priv->children[i],
+ priv->children[i]->fops->opendir, loc, fd, NULL);
+
+ if (!--call_count)
+ break;
}
+ }
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (opendir, frame, -1, op_errno, fd, NULL);
- return 0;
+ AFR_STACK_UNWIND(opendir, frame, -1, op_errno, fd, NULL);
+ return 0;
}
static int
-afr_validate_read_subvol (inode_t *inode, xlator_t *this, int par_read_subvol)
+afr_validate_read_subvol(inode_t *inode, xlator_t *this, int par_read_subvol)
{
- int gen = 0;
- int entry_read_subvol = 0;
- unsigned char *data_readable = NULL;
- unsigned char *metadata_readable = NULL;
- afr_private_t *priv = NULL;
-
- priv = this->private;
- data_readable = alloca0 (priv->child_count);
- metadata_readable = alloca0 (priv->child_count);
-
- afr_inode_read_subvol_get (inode, this, data_readable,
- metadata_readable, &gen);
-
- if (gen != priv->event_generation ||
- !data_readable[par_read_subvol] ||
- !metadata_readable[par_read_subvol])
- return -1;
-
- /* Once the control reaches the following statement, it means that the
- * parent's read subvol is perfectly readable. So calling
- * either afr_data_subvol_get() or afr_metadata_subvol_get() would
- * yield the same result. Hence, choosing afr_data_subvol_get() below.
- */
-
- if (!priv->consistent_metadata)
- return 0;
-
- /* For an inode fetched through readdirp which is yet to be linked,
- * inode ctx would not be initialised (yet). So this function returns
- * -1 above due to gen being 0, which is why it is OK to pass NULL for
- * read_subvol_args here.
- */
- entry_read_subvol = afr_data_subvol_get (inode, this, 0, 0, NULL);
- if (entry_read_subvol != par_read_subvol)
- return -1;
-
+ int gen = 0;
+ int entry_read_subvol = 0;
+ unsigned char *data_readable = NULL;
+ unsigned char *metadata_readable = NULL;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ data_readable = alloca0(priv->child_count);
+ metadata_readable = alloca0(priv->child_count);
+
+ afr_inode_read_subvol_get(inode, this, data_readable, metadata_readable,
+ &gen);
+
+ if (gen != priv->event_generation || !data_readable[par_read_subvol] ||
+ !metadata_readable[par_read_subvol])
+ return -1;
+
+ /* Once the control reaches the following statement, it means that the
+ * parent's read subvol is perfectly readable. So calling
+ * either afr_data_subvol_get() or afr_metadata_subvol_get() would
+ * yield the same result. Hence, choosing afr_data_subvol_get() below.
+ */
+
+ if (!priv->consistent_metadata)
return 0;
+ /* For an inode fetched through readdirp which is yet to be linked,
+ * inode ctx would not be initialised (yet). So this function returns
+ * -1 above due to gen being 0, which is why it is OK to pass NULL for
+ * read_subvol_args here.
+ */
+ entry_read_subvol = afr_data_subvol_get(inode, this, NULL, NULL, NULL,
+ NULL);
+ if (entry_read_subvol != par_read_subvol)
+ return -1;
+
+ return 0;
}
static void
-afr_readdir_transform_entries (gf_dirent_t *subvol_entries, int subvol,
- gf_dirent_t *entries, fd_t *fd)
+afr_readdir_transform_entries(call_frame_t *frame, gf_dirent_t *subvol_entries,
+ int subvol, gf_dirent_t *entries, fd_t *fd)
{
- int ret = -1;
- gf_dirent_t *entry = NULL;
- gf_dirent_t *tmp = NULL;
- xlator_t *this = NULL;
- afr_private_t *priv = NULL;
- gf_boolean_t need_heal = _gf_false;
- gf_boolean_t validate_subvol = _gf_false;
-
- this = THIS;
- priv = this->private;
-
- need_heal = afr_get_need_heal (this);
- validate_subvol = need_heal | priv->consistent_metadata;
-
- list_for_each_entry_safe (entry, tmp, &subvol_entries->list, list) {
- if (__is_root_gfid (fd->inode->gfid) &&
- !strcmp (entry->d_name, GF_REPLICATE_TRASH_DIR)) {
- continue;
- }
-
- list_del_init (&entry->list);
- list_add_tail (&entry->list, &entries->list);
-
- if (!validate_subvol)
- continue;
-
- if (entry->inode) {
- ret = afr_validate_read_subvol (entry->inode, this,
- subvol);
- if (ret == -1) {
- inode_unref (entry->inode);
- entry->inode = NULL;
- continue;
- }
- }
+ int ret = -1;
+ gf_dirent_t *entry = NULL;
+ gf_dirent_t *tmp = NULL;
+ xlator_t *this = NULL;
+ afr_private_t *priv = NULL;
+ gf_boolean_t need_heal = _gf_false;
+ gf_boolean_t validate_subvol = _gf_false;
+
+ this = THIS;
+ priv = this->private;
+
+ need_heal = afr_get_need_heal(this);
+ validate_subvol = need_heal | priv->consistent_metadata;
+
+ list_for_each_entry_safe(entry, tmp, &subvol_entries->list, list)
+ {
+ if (afr_is_private_directory(priv, fd->inode->gfid, entry->d_name,
+ frame->root->pid)) {
+ continue;
}
-}
+ list_del_init(&entry->list);
+ list_add_tail(&entry->list, &entries->list);
+
+ if (!validate_subvol)
+ continue;
+
+ if (entry->inode) {
+ ret = afr_validate_read_subvol(entry->inode, this, subvol);
+ if (ret == -1) {
+ inode_unref(entry->inode);
+ entry->inode = NULL;
+ continue;
+ }
+ }
+ }
+}
int32_t
-afr_readdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, gf_dirent_t *subvol_entries,
- dict_t *xdata)
+afr_readdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *subvol_entries,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- gf_dirent_t entries;
+ afr_local_t *local = NULL;
+ gf_dirent_t entries;
- INIT_LIST_HEAD (&entries.list);
+ INIT_LIST_HEAD(&entries.list);
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0 && !local->cont.readdir.offset) {
- /* failover only if this was first readdir, detected
- by offset == 0 */
- local->op_ret = op_ret;
- local->op_errno = op_errno;
+ if (op_ret < 0 && !local->cont.readdir.offset) {
+ /* failover only if this was first readdir, detected
+ by offset == 0 */
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- if (op_ret >= 0)
- afr_readdir_transform_entries (subvol_entries, (long) cookie,
- &entries, local->fd);
+ if (op_ret >= 0)
+ afr_readdir_transform_entries(frame, subvol_entries, (long)cookie,
+ &entries, local->fd);
- AFR_STACK_UNWIND (readdir, frame, op_ret, op_errno, &entries, xdata);
+ AFR_STACK_UNWIND(readdir, frame, op_ret, op_errno, &entries, xdata);
- gf_dirent_free (&entries);
+ gf_dirent_free(&entries);
- return 0;
+ return 0;
}
-
int
-afr_readdir_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_readdir_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
-
- priv = this->private;
- local = frame->local;
- fd_ctx = afr_fd_ctx_get (local->fd, this);
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (readdir, frame, local->op_ret,
- local->op_errno, 0, 0);
- return 0;
- }
-
- fd_ctx->readdir_subvol = subvol;
-
- if (local->op == GF_FOP_READDIR)
- STACK_WIND_COOKIE (frame, afr_readdir_cbk,
- (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->readdir,
- local->fd, local->cont.readdir.size,
- local->cont.readdir.offset,
- local->xdata_req);
- else
- STACK_WIND_COOKIE (frame, afr_readdir_cbk,
- (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->readdirp,
- local->fd, local->cont.readdir.size,
- local->cont.readdir.offset,
- local->xdata_req);
- return 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ priv = this->private;
+ local = frame->local;
+ fd_ctx = afr_fd_ctx_get(local->fd, this);
+ if (!fd_ctx) {
+ local->op_errno = EINVAL;
+ local->op_ret = -1;
+ }
+
+ if (subvol == -1 || !fd_ctx) {
+ AFR_STACK_UNWIND(readdir, frame, local->op_ret, local->op_errno, 0, 0);
+ return 0;
+ }
+
+ fd_ctx->readdir_subvol = subvol;
+
+ if (local->op == GF_FOP_READDIR)
+ STACK_WIND_COOKIE(frame, afr_readdir_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->readdir, local->fd,
+ local->cont.readdir.size, local->cont.readdir.offset,
+ local->xdata_req);
+ else
+ STACK_WIND_COOKIE(frame, afr_readdir_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->readdirp, local->fd,
+ local->cont.readdir.size, local->cont.readdir.offset,
+ local->xdata_req);
+ return 0;
}
-
int
-afr_do_readdir (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
- off_t offset, int whichop, dict_t *dict)
+afr_do_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, int whichop, dict_t *dict)
{
- afr_local_t *local = NULL;
- int32_t op_errno = 0;
- int subvol = -1;
- afr_fd_ctx_t *fd_ctx = NULL;
-
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx) {
- op_errno = EINVAL;
- goto out;
- }
-
- local->op = whichop;
- local->fd = fd_ref (fd);
- local->cont.readdir.size = size;
- local->cont.readdir.offset = offset;
- local->xdata_req = (dict)? dict_ref (dict) : NULL;
-
- subvol = fd_ctx->readdir_subvol;
-
- if (offset == 0 || subvol == -1) {
- /* First readdir has option of failing over and selecting
- an appropriate read subvolume */
- afr_read_txn (frame, this, fd->inode, afr_readdir_wind,
- AFR_DATA_TRANSACTION);
- } else {
- /* But continued readdirs MUST stick to the same subvolume
- without an option to failover */
- afr_readdir_wind (frame, this, subvol);
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ int32_t op_errno = 0;
+ int subvol = -1;
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ fd_ctx = afr_fd_ctx_get(fd, this);
+ if (!fd_ctx) {
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ local->op = whichop;
+ local->fd = fd_ref(fd);
+ local->cont.readdir.size = size;
+ local->cont.readdir.offset = offset;
+ local->xdata_req = (dict) ? dict_ref(dict) : NULL;
+
+ subvol = fd_ctx->readdir_subvol;
+
+ if (offset == 0 || subvol == -1) {
+ /* First readdir has option of failing over and selecting
+ an appropriate read subvolume */
+ afr_read_txn(frame, this, fd->inode, afr_readdir_wind,
+ AFR_DATA_TRANSACTION);
+ } else {
+ /* But continued readdirs MUST stick to the same subvolume
+ without an option to failover */
+ afr_readdir_wind(frame, this, subvol);
+ }
+
+ return 0;
out:
- AFR_STACK_UNWIND (readdir, frame, -1, op_errno, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(readdir, frame, -1, op_errno, NULL, NULL);
+ return 0;
}
-
int32_t
-afr_readdir (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
- off_t offset, dict_t *xdata)
+afr_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *xdata)
{
- afr_do_readdir (frame, this, fd, size, offset, GF_FOP_READDIR, xdata);
+ afr_do_readdir(frame, this, fd, size, offset, GF_FOP_READDIR, xdata);
- return 0;
+ return 0;
}
-
int32_t
-afr_readdirp (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
- off_t offset, dict_t *dict)
+afr_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *dict)
{
- afr_do_readdir (frame, this, fd, size, offset, GF_FOP_READDIRP, dict);
+ afr_do_readdir(frame, this, fd, size, offset, GF_FOP_READDIRP, dict);
- return 0;
+ return 0;
}
-
int32_t
-afr_releasedir (xlator_t *this, fd_t *fd)
+afr_releasedir(xlator_t *this, fd_t *fd)
{
- afr_cleanup_fd_ctx (this, fd);
+ afr_cleanup_fd_ctx(this, fd);
- return 0;
+ return 0;
}
diff --git a/xlators/cluster/afr/src/afr-dir-read.h b/xlators/cluster/afr/src/afr-dir-read.h
index 09456d15949..773e925ec6c 100644
--- a/xlators/cluster/afr/src/afr-dir-read.h
+++ b/xlators/cluster/afr/src/afr-dir-read.h
@@ -11,26 +11,23 @@
#ifndef __DIR_READ_H__
#define __DIR_READ_H__
-
int32_t
-afr_opendir (call_frame_t *frame, xlator_t *this,
- loc_t *loc, fd_t *fd, dict_t *xdata);
+afr_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata);
int32_t
-afr_releasedir (xlator_t *this, fd_t *fd);
+afr_releasedir(xlator_t *this, fd_t *fd);
int32_t
-afr_readdir (call_frame_t *frame, xlator_t *this,
- fd_t *fd, size_t size, off_t offset, dict_t *xdata);
-
+afr_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *xdata);
int32_t
-afr_readdirp (call_frame_t *frame, xlator_t *this,
- fd_t *fd, size_t size, off_t offset, dict_t *dict);
+afr_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *dict);
int32_t
-afr_checksum (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int32_t flags, dict_t *xdata);
-
+afr_checksum(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ dict_t *xdata);
#endif /* __DIR_READ_H__ */
diff --git a/xlators/cluster/afr/src/afr-dir-write.c b/xlators/cluster/afr/src/afr-dir-write.c
index b6b81d737b0..b7cceb79158 100644
--- a/xlators/cluster/afr/src/afr-dir-write.c
+++ b/xlators/cluster/afr/src/afr-dir-write.c
@@ -8,516 +8,493 @@
cases as published by the Free Software Foundation.
*/
-
#include <libgen.h>
#include <unistd.h>
-#include <fnmatch.h>
#include <sys/time.h>
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "afr.h"
-#include "dict.h"
-#include "xlator.h"
-#include "hashfn.h"
-#include "logging.h"
-#include "stack.h"
-#include "list.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "common-utils.h"
-#include "compat-errno.h"
-#include "compat.h"
-#include "byte-order.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/list.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/byte-order.h>
#include "afr.h"
#include "afr-transaction.h"
void
-afr_mark_entry_pending_changelog (call_frame_t *frame, xlator_t *this);
+afr_mark_entry_pending_changelog(call_frame_t *frame, xlator_t *this);
int
-afr_build_parent_loc (loc_t *parent, loc_t *child, int32_t *op_errno)
+afr_build_parent_loc(loc_t *parent, loc_t *child, int32_t *op_errno)
{
- int ret = -1;
- char *child_path = NULL;
+ int ret = -1;
+ char *child_path = NULL;
+
+ if (!child->parent) {
+ if (op_errno)
+ *op_errno = EINVAL;
+ goto out;
+ }
+
+ child_path = gf_strdup(child->path);
+ if (!child_path) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ parent->path = gf_strdup(dirname(child_path));
+ if (!parent->path) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ parent->inode = inode_ref(child->parent);
+ gf_uuid_copy(parent->gfid, child->pargfid);
+
+ ret = 0;
+out:
+ GF_FREE(child_path);
- if (!child->parent) {
- if (op_errno)
- *op_errno = EINVAL;
- goto out;
- }
+ return ret;
+}
- child_path = gf_strdup (child->path);
- if (!child_path) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
+static void
+__afr_dir_write_finalize(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int inode_read_subvol = -1;
+ int parent_read_subvol = -1;
+ int parent2_read_subvol = -1;
+ int i = 0;
+ afr_read_subvol_args_t args = {
+ 0,
+ };
+
+ local = frame->local;
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret == -1)
+ continue;
+ gf_uuid_copy(args.gfid, local->replies[i].poststat.ia_gfid);
+ args.ia_type = local->replies[i].poststat.ia_type;
+ break;
+ }
+
+ if (local->inode) {
+ if (local->op != GF_FOP_RENAME && local->op != GF_FOP_LINK)
+ afr_replies_interpret(frame, this, local->inode, NULL);
+
+ inode_read_subvol = afr_data_subvol_get(local->inode, this, NULL, NULL,
+ NULL, &args);
+ }
+
+ if (local->parent)
+ parent_read_subvol = afr_data_subvol_get(local->parent, this, NULL,
+ local->readable, NULL, NULL);
+
+ if (local->parent2)
+ parent2_read_subvol = afr_data_subvol_get(local->parent2, this, NULL,
+ local->readable2, NULL, NULL);
+
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
+ afr_pick_error_xdata(local, priv, local->parent, local->readable,
+ local->parent2, local->readable2);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret < 0) {
+ if (local->inode)
+ afr_inode_need_refresh_set(local->inode, this);
+ if (local->parent)
+ afr_inode_need_refresh_set(local->parent, this);
+ if (local->parent2)
+ afr_inode_need_refresh_set(local->parent2, this);
+ continue;
}
- parent->path = gf_strdup (dirname (child_path));
- if (!parent->path) {
- if (op_errno)
- *op_errno = ENOMEM;
- goto out;
+ if (local->op_ret == -1) {
+ local->op_ret = local->replies[i].op_ret;
+ local->op_errno = local->replies[i].op_errno;
+
+ local->cont.dir_fop.buf = local->replies[i].poststat;
+ local->cont.dir_fop.preparent = local->replies[i].preparent;
+ local->cont.dir_fop.postparent = local->replies[i].postparent;
+ local->cont.dir_fop.prenewparent = local->replies[i].preparent2;
+ local->cont.dir_fop.postnewparent = local->replies[i].postparent2;
+ if (local->xdata_rsp) {
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = NULL;
+ }
+
+ if (local->replies[i].xdata)
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ continue;
}
- parent->inode = inode_ref (child->parent);
- gf_uuid_copy (parent->gfid, child->pargfid);
-
- ret = 0;
-out:
- GF_FREE (child_path);
-
- return ret;
-}
-
+ if (i == inode_read_subvol) {
+ local->cont.dir_fop.buf = local->replies[i].poststat;
+ if (local->replies[i].xdata) {
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ }
+ }
-static void
-__afr_dir_write_finalize (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int inode_read_subvol = -1;
- int parent_read_subvol = -1;
- int parent2_read_subvol = -1;
- int i = 0;
- afr_read_subvol_args_t args = {0,};
-
- local = frame->local;
- priv = this->private;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret == -1)
- continue;
- gf_uuid_copy (args.gfid, local->replies[i].poststat.ia_gfid);
- args.ia_type = local->replies[i].poststat.ia_type;
- break;
+ if (i == parent_read_subvol) {
+ local->cont.dir_fop.preparent = local->replies[i].preparent;
+ local->cont.dir_fop.postparent = local->replies[i].postparent;
}
- if (local->inode) {
- afr_replies_interpret (frame, this, local->inode, NULL);
- inode_read_subvol = afr_data_subvol_get (local->inode, this,
- NULL, NULL, &args);
- }
-
- if (local->parent)
- parent_read_subvol = afr_data_subvol_get (local->parent, this,
- NULL, NULL, NULL);
- if (local->parent2)
- parent2_read_subvol = afr_data_subvol_get (local->parent2, this,
- NULL, NULL, NULL);
-
- local->op_ret = -1;
- local->op_errno = afr_final_errno (local, priv);
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret < 0) {
- if (local->inode)
- afr_inode_read_subvol_reset (local->inode,
- this);
- if (local->parent)
- afr_inode_read_subvol_reset (local->parent,
- this);
- if (local->parent2)
- afr_inode_read_subvol_reset (local->parent2,
- this);
- continue;
- }
-
- if (local->op_ret == -1) {
- local->op_ret = local->replies[i].op_ret;
- local->op_errno = local->replies[i].op_errno;
-
- local->cont.dir_fop.buf =
- local->replies[i].poststat;
- local->cont.dir_fop.preparent =
- local->replies[i].preparent;
- local->cont.dir_fop.postparent =
- local->replies[i].postparent;
- local->cont.dir_fop.prenewparent =
- local->replies[i].preparent2;
- local->cont.dir_fop.postnewparent =
- local->replies[i].postparent2;
- if (local->replies[i].xdata)
- local->xdata_rsp =
- dict_ref (local->replies[i].xdata);
- continue;
- }
-
- if (i == inode_read_subvol) {
- local->cont.dir_fop.buf =
- local->replies[i].poststat;
- if (local->replies[i].xdata) {
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
- local->xdata_rsp =
- dict_ref (local->replies[i].xdata);
- }
- }
-
- if (i == parent_read_subvol) {
- local->cont.dir_fop.preparent =
- local->replies[i].preparent;
- local->cont.dir_fop.postparent =
- local->replies[i].postparent;
- }
-
- if (i == parent2_read_subvol) {
- local->cont.dir_fop.prenewparent =
- local->replies[i].preparent2;
- local->cont.dir_fop.postnewparent =
- local->replies[i].postparent2;
- }
- }
-
- afr_txn_arbitrate_fop_cbk (frame, this);
+ if (i == parent2_read_subvol) {
+ local->cont.dir_fop.prenewparent = local->replies[i].preparent2;
+ local->cont.dir_fop.postnewparent = local->replies[i].postparent2;
+ }
+ }
}
-
static void
-__afr_dir_write_fill (call_frame_t *frame, xlator_t *this, int child_index,
- int op_ret, int op_errno, struct iatt *poststat,
- struct iatt *preparent, struct iatt *postparent,
- struct iatt *preparent2, struct iatt *postparent2,
- dict_t *xdata)
+__afr_dir_write_fill(call_frame_t *frame, xlator_t *this, int child_index,
+ int op_ret, int op_errno, struct iatt *poststat,
+ struct iatt *preparent, struct iatt *postparent,
+ struct iatt *preparent2, struct iatt *postparent2,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
-
- local = frame->local;
- fd_ctx = local->fd_ctx;
-
- local->replies[child_index].valid = 1;
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
-
- if (op_ret >= 0) {
- if (poststat)
- local->replies[child_index].poststat = *poststat;
- if (preparent)
- local->replies[child_index].preparent = *preparent;
- if (postparent)
- local->replies[child_index].postparent = *postparent;
- if (preparent2)
- local->replies[child_index].preparent2 = *preparent2;
- if (postparent2)
- local->replies[child_index].postparent2 = *postparent2;
- if (xdata)
- local->replies[child_index].xdata = dict_ref (xdata);
-
- if (fd_ctx)
- fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
- } else {
- if (op_errno != ENOTEMPTY)
- afr_transaction_fop_failed (frame, this, child_index);
- if (fd_ctx)
- fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
- }
-
- return;
+ afr_local_t *local = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ local = frame->local;
+ fd_ctx = local->fd_ctx;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
+
+ if (op_ret >= 0) {
+ if (poststat)
+ local->replies[child_index].poststat = *poststat;
+ if (preparent)
+ local->replies[child_index].preparent = *preparent;
+ if (postparent)
+ local->replies[child_index].postparent = *postparent;
+ if (preparent2)
+ local->replies[child_index].preparent2 = *preparent2;
+ if (postparent2)
+ local->replies[child_index].postparent2 = *postparent2;
+ if (fd_ctx)
+ fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
+ } else {
+ if (op_errno != ENOTEMPTY)
+ afr_transaction_fop_failed(frame, this, child_index);
+ if (fd_ctx)
+ fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
+ }
+
+ return;
}
-
static int
-__afr_dir_write_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, struct iatt *buf,
- struct iatt *preparent, struct iatt *postparent,
- struct iatt *preparent2, struct iatt *postparent2,
- dict_t *xdata)
+__afr_dir_write_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent,
+ struct iatt *preparent2, struct iatt *postparent2,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int child_index = (long) cookie;
- int call_count = -1;
-
- local = frame->local;
-
- LOCK (&frame->lock);
- {
- __afr_dir_write_fill (frame, this, child_index, op_ret,
- op_errno, buf, preparent, postparent,
- preparent2, postparent2, xdata);
- }
- UNLOCK (&frame->lock);
- call_count = afr_frame_return (frame);
-
- if (call_count == 0) {
- __afr_dir_write_finalize (frame, this);
-
- if (afr_txn_nothing_failed (frame, this))
- local->transaction.unwind (frame, this);
+ afr_local_t *local = NULL;
+ int child_index = (long)cookie;
+ int call_count = -1;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ LOCK(&frame->lock);
+ {
+ __afr_dir_write_fill(frame, this, child_index, op_ret, op_errno, buf,
+ preparent, postparent, preparent2, postparent2,
+ xdata);
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
+
+ if (call_count == 0) {
+ __afr_dir_write_finalize(frame, this);
+
+ if (afr_txn_nothing_failed(frame, this)) {
+ /*if it did pre-op, it will do post-op changing ctime*/
+ if (priv->consistent_metadata && afr_needs_changelog_update(local))
+ afr_zero_fill_stat(local);
+ local->transaction.unwind(frame, this);
+ }
- afr_mark_entry_pending_changelog (frame, this);
+ afr_mark_entry_pending_changelog(frame, this);
- local->transaction.resume (frame, this);
- }
+ afr_transaction_resume(frame, this);
+ }
- return 0;
+ return 0;
}
-
int
-afr_mark_new_entry_changelog_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int op_ret, int op_errno,
- dict_t *xattr, dict_t *xdata)
+afr_mark_new_entry_changelog_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int op_ret, int op_errno,
+ dict_t *xattr, dict_t *xdata)
{
- int call_count = 0;
+ int call_count = 0;
- call_count = afr_frame_return (frame);
+ call_count = afr_frame_return(frame);
- if (call_count == 0)
- AFR_STACK_DESTROY (frame);
+ if (call_count == 0)
+ AFR_STACK_DESTROY(frame);
- return 0;
+ return 0;
}
-
void
-afr_mark_new_entry_changelog (call_frame_t *frame, xlator_t *this)
+afr_mark_new_entry_changelog(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *new_frame = NULL;
- afr_local_t *local = NULL;
- afr_local_t *new_local = NULL;
- afr_private_t *priv = NULL;
- dict_t *xattr = NULL;
- int32_t **changelog = NULL;
- int i = 0;
- int op_errno = ENOMEM;
- unsigned char *pending = NULL;
- int call_count = 0;
-
- local = frame->local;
- priv = this->private;
-
- new_frame = copy_frame (frame);
- if (!new_frame)
- goto out;
-
- new_local = AFR_FRAME_INIT (new_frame, op_errno);
- if (!new_local)
- goto out;
-
- xattr = dict_new ();
- if (!xattr)
- goto out;
-
- pending = alloca0 (priv->child_count);
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.pre_op[i] &&
- !local->transaction.failed_subvols[i]) {
- call_count ++;
- continue;
- }
- pending[i] = 1;
- }
-
- changelog = afr_mark_pending_changelog (priv, pending, xattr,
- local->cont.dir_fop.buf.ia_type);
- if (!changelog)
- goto out;
-
- new_local->pending = changelog;
- gf_uuid_copy (new_local->loc.gfid, local->cont.dir_fop.buf.ia_gfid);
- new_local->loc.inode = inode_ref (local->inode);
-
- new_local->call_count = call_count;
-
- for (i = 0; i < priv->child_count; i++) {
- if (pending[i])
- continue;
-
- STACK_WIND_COOKIE (new_frame, afr_mark_new_entry_changelog_cbk,
- (void *) (long) i, priv->children[i],
- priv->children[i]->fops->xattrop,
- &new_local->loc, GF_XATTROP_ADD_ARRAY,
- xattr, NULL);
- if (!--call_count)
- break;
+ call_frame_t *new_frame = NULL;
+ afr_local_t *local = NULL;
+ afr_local_t *new_local = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xattr = NULL;
+ int32_t **changelog = NULL;
+ int i = 0;
+ int op_errno = ENOMEM;
+ unsigned char *pending = NULL;
+ int call_count = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ new_frame = copy_frame(frame);
+ if (!new_frame)
+ goto out;
+
+ new_local = AFR_FRAME_INIT(new_frame, op_errno);
+ if (!new_local)
+ goto out;
+
+ xattr = dict_new();
+ if (!xattr)
+ goto out;
+
+ pending = alloca0(priv->child_count);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.pre_op[i] &&
+ !local->transaction.failed_subvols[i]) {
+ call_count++;
+ continue;
}
+ pending[i] = 1;
+ }
+
+ changelog = afr_mark_pending_changelog(priv, pending, xattr,
+ local->cont.dir_fop.buf.ia_type);
+ if (!changelog)
+ goto out;
+
+ new_local->pending = changelog;
+ gf_uuid_copy(new_local->loc.gfid, local->cont.dir_fop.buf.ia_gfid);
+ new_local->loc.inode = inode_ref(local->inode);
+
+ new_local->call_count = call_count;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (pending[i])
+ continue;
- new_frame = NULL;
+ STACK_WIND_COOKIE(new_frame, afr_mark_new_entry_changelog_cbk,
+ (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->xattrop, &new_local->loc,
+ GF_XATTROP_ADD_ARRAY, xattr, NULL);
+ if (!--call_count)
+ break;
+ }
+
+ new_frame = NULL;
out:
- if (new_frame)
- AFR_STACK_DESTROY (new_frame);
- if (xattr)
- dict_unref (xattr);
- return;
+ if (new_frame)
+ AFR_STACK_DESTROY(new_frame);
+ if (xattr)
+ dict_unref(xattr);
+ return;
}
-
void
-afr_mark_entry_pending_changelog (call_frame_t *frame, xlator_t *this)
+afr_mark_entry_pending_changelog(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int pre_op_count = 0;
- int failed_count = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int pre_op_count = 0;
+ int failed_count = 0;
+ unsigned char *success_replies = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- if (local->op_ret < 0)
- return;
+ if (local->op_ret < 0)
+ return;
- if (local->op != GF_FOP_CREATE && local->op != GF_FOP_MKNOD)
- return;
+ if (local->op != GF_FOP_CREATE && local->op != GF_FOP_MKNOD &&
+ local->op != GF_FOP_MKDIR)
+ return;
- pre_op_count = AFR_COUNT (local->transaction.pre_op, priv->child_count);
- failed_count = AFR_COUNT (local->transaction.failed_subvols,
- priv->child_count);
+ pre_op_count = AFR_COUNT(local->transaction.pre_op, priv->child_count);
+ failed_count = AFR_COUNT(local->transaction.failed_subvols,
+ priv->child_count);
- if (pre_op_count == priv->child_count && !failed_count)
- return;
+ /* FOP succeeded on all bricks. */
+ if (pre_op_count == priv->child_count && !failed_count)
+ return;
- afr_mark_new_entry_changelog (frame, this);
+ /* FOP did not suceed on quorum no. of bricks. */
+ success_replies = alloca0(priv->child_count);
+ afr_fill_success_replies(local, priv, success_replies);
+ if (!afr_has_quorum(success_replies, this, NULL))
+ return;
+ if (priv->thin_arbiter_count) {
+ /*Mark new entry using ta file*/
+ local->is_new_entry = _gf_true;
return;
-}
+ }
+ afr_mark_new_entry_changelog(frame, this);
+
+ return;
+}
/* {{{ create */
int
-afr_create_unwind (call_frame_t *frame, xlator_t *this)
+afr_create_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
+ main_frame = afr_transaction_detach_fop_frame(frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (create, main_frame, local->op_ret, local->op_errno,
- local->cont.create.fd, local->inode,
- &local->cont.dir_fop.buf,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent, local->xdata_rsp);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(create, main_frame, local->op_ret, local->op_errno,
+ local->cont.create.fd, local->inode,
+ &local->cont.dir_fop.buf, &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_create_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno,
- fd_t *fd, inode_t *inode, struct iatt *buf,
- struct iatt *preparent, struct iatt *postparent,
- dict_t *xdata)
+afr_create_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, buf,
- preparent, postparent, NULL, NULL, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ preparent, postparent, NULL, NULL, xdata);
}
-
int
-afr_create_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_create_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
-
- STACK_WIND_COOKIE (frame, afr_create_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->create,
- &local->loc, local->cont.create.flags,
- local->cont.create.mode, local->umask,
- local->cont.create.fd, local->xdata_req);
- return 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ STACK_WIND_COOKIE(frame, afr_create_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->create, &local->loc,
+ local->cont.create.flags, local->cont.create.mode,
+ local->umask, local->cont.create.fd, local->xdata_req);
+ return 0;
}
-
int
-afr_create (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
- mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+afr_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, loc);
-
- local->fd_ctx = afr_fd_ctx_get (fd, this);
- if (!local->fd_ctx)
- goto out;
-
- local->inode = inode_ref (loc->inode);
- local->parent = inode_ref (loc->parent);
-
- local->op = GF_FOP_CREATE;
- local->cont.create.flags = flags;
- local->fd_ctx->flags = flags;
- local->cont.create.mode = mode;
- local->cont.create.fd = fd_ref (fd);
- local->umask = umask;
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->transaction.wind = afr_create_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_create_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, loc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (loc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[0], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- int_lock->lockee_count++;
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, loc);
+
+ local->fd_ctx = afr_fd_ctx_get(fd, this);
+ if (!local->fd_ctx)
+ goto out;
+
+ local->inode = inode_ref(loc->inode);
+ local->parent = inode_ref(loc->parent);
+
+ local->op = GF_FOP_CREATE;
+ local->cont.create.flags = flags;
+ local->fd_ctx->flags = flags;
+ local->cont.create.mode = mode;
+ local->cont.create.fd = fd_ref(fd);
+ local->umask = umask;
+
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->transaction.wind = afr_create_wind;
+ local->transaction.unwind = afr_create_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, loc, &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(loc->path);
+ ret = afr_transaction(transaction_frame, this, AFR_ENTRY_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (create, frame, -1, op_errno, NULL, NULL, NULL, NULL,
- NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(create, frame, -1, op_errno, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
}
/* }}} */
@@ -525,518 +502,436 @@ out:
/* {{{ mknod */
int
-afr_mknod_unwind (call_frame_t *frame, xlator_t *this)
+afr_mknod_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (mknod, main_frame, local->op_ret, local->op_errno,
- local->inode, &local->cont.dir_fop.buf,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(mknod, main_frame, local->op_ret, local->op_errno,
+ local->inode, &local->cont.dir_fop.buf,
+ &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_mknod_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent, dict_t *xdata)
+afr_mknod_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, buf,
- preparent, postparent, NULL, NULL, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ preparent, postparent, NULL, NULL, xdata);
}
-
int
-afr_mknod_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_mknod_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
-
- STACK_WIND_COOKIE (frame, afr_mknod_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->mknod,
- &local->loc, local->cont.mknod.mode,
- local->cont.mknod.dev, local->umask,
- local->xdata_req);
- return 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ STACK_WIND_COOKIE(frame, afr_mknod_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->mknod, &local->loc,
+ local->cont.mknod.mode, local->cont.mknod.dev,
+ local->umask, local->xdata_req);
+ return 0;
}
int
-afr_mknod (call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
- dev_t dev, mode_t umask, dict_t *xdata)
+afr_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t dev, mode_t umask, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
- local->parent = inode_ref (loc->parent);
-
- local->op = GF_FOP_MKNOD;
- local->cont.mknod.mode = mode;
- local->cont.mknod.dev = dev;
- local->umask = umask;
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->transaction.wind = afr_mknod_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_mknod_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, loc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (loc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[0], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- int_lock->lockee_count++;
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, loc);
+ local->inode = inode_ref(loc->inode);
+ local->parent = inode_ref(loc->parent);
+
+ local->op = GF_FOP_MKNOD;
+ local->cont.mknod.mode = mode;
+ local->cont.mknod.dev = dev;
+ local->umask = umask;
+
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->transaction.wind = afr_mknod_wind;
+ local->transaction.unwind = afr_mknod_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, loc, &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(loc->path);
+ ret = afr_transaction(transaction_frame, this, AFR_ENTRY_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (mknod, frame, -1, op_errno, NULL, NULL, NULL, NULL,
- NULL);
- return 0;
+ AFR_STACK_UNWIND(mknod, frame, -1, op_errno, NULL, NULL, NULL, NULL, NULL);
+ return 0;
}
/* }}} */
/* {{{ mkdir */
-
int
-afr_mkdir_unwind (call_frame_t *frame, xlator_t *this)
+afr_mkdir_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
-
- local = frame->local;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (mkdir, main_frame, local->op_ret, local->op_errno,
- local->inode, &local->cont.dir_fop.buf,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(mkdir, main_frame, local->op_ret, local->op_errno,
+ local->inode, &local->cont.dir_fop.buf,
+ &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_mkdir_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent, dict_t *xdata)
+afr_mkdir_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, buf,
- preparent, postparent, NULL, NULL, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ preparent, postparent, NULL, NULL, xdata);
}
-
int
-afr_mkdir_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_mkdir_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_mkdir_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->mkdir, &local->loc,
- local->cont.mkdir.mode, local->umask,
- local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_mkdir_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->mkdir, &local->loc,
+ local->cont.mkdir.mode, local->umask, local->xdata_req);
+ return 0;
}
-
int
-afr_mkdir (call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
- mode_t umask, dict_t *xdata)
+afr_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
- local->parent = inode_ref (loc->parent);
-
- local->cont.mkdir.mode = mode;
- local->umask = umask;
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->op = GF_FOP_MKDIR;
- local->transaction.wind = afr_mkdir_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_mkdir_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, loc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (loc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[0], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- int_lock->lockee_count++;
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, loc);
+ local->inode = inode_ref(loc->inode);
+ local->parent = inode_ref(loc->parent);
+
+ local->cont.mkdir.mode = mode;
+ local->umask = umask;
+
+ if (!xdata || !dict_get_sizen(xdata, "gfid-req")) {
+ op_errno = EPERM;
+ gf_msg_callingfn(this->name, GF_LOG_WARNING, op_errno,
+ AFR_MSG_GFID_NULL,
+ "mkdir: %s is received "
+ "without gfid-req %p",
+ loc->path, xdata);
+ goto out;
+ }
+
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ if (!local->xdata_req) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ local->op = GF_FOP_MKDIR;
+ local->transaction.wind = afr_mkdir_wind;
+ local->transaction.unwind = afr_mkdir_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, loc, &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(loc->path);
+ ret = afr_transaction(transaction_frame, this, AFR_ENTRY_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (mkdir, frame, -1, op_errno, NULL, NULL, NULL, NULL,
- NULL);
- return 0;
+ AFR_STACK_UNWIND(mkdir, frame, -1, op_errno, NULL, NULL, NULL, NULL, NULL);
+ return 0;
}
/* }}} */
/* {{{ link */
-
int
-afr_link_unwind (call_frame_t *frame, xlator_t *this)
+afr_link_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
-
- local = frame->local;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (link, main_frame, local->op_ret, local->op_errno,
- local->inode, &local->cont.dir_fop.buf,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(link, main_frame, local->op_ret, local->op_errno,
+ local->inode, &local->cont.dir_fop.buf,
+ &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_link_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent, dict_t *xdata)
+afr_link_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, buf,
- preparent, postparent, NULL, NULL, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ preparent, postparent, NULL, NULL, xdata);
}
-
int
-afr_link_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_link_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_link_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->link,
- &local->loc, &local->newloc, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_link_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->link, &local->loc,
+ &local->newloc, local->xdata_req);
+ return 0;
}
-
int
-afr_link (call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
- dict_t *xdata)
+afr_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, oldloc);
- loc_copy (&local->newloc, newloc);
-
- local->inode = inode_ref (oldloc->inode);
- local->parent = inode_ref (newloc->parent);
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->op = GF_FOP_LINK;
-
- local->transaction.wind = afr_link_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_link_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, newloc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (newloc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[0], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- int_lock->lockee_count++;
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, oldloc);
+ loc_copy(&local->newloc, newloc);
- return 0;
+ local->inode = inode_ref(oldloc->inode);
+ local->parent = inode_ref(newloc->parent);
+
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->op = GF_FOP_LINK;
+
+ local->transaction.wind = afr_link_wind;
+ local->transaction.unwind = afr_link_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, newloc,
+ &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(newloc->path);
+ ret = afr_transaction(transaction_frame, this, AFR_ENTRY_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (link, frame, -1, op_errno, NULL, NULL, NULL, NULL,
- NULL);
- return 0;
+ AFR_STACK_UNWIND(link, frame, -1, op_errno, NULL, NULL, NULL, NULL, NULL);
+ return 0;
}
/* }}} */
/* {{{ symlink */
-
int
-afr_symlink_unwind (call_frame_t *frame, xlator_t *this)
+afr_symlink_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
-
- local = frame->local;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (symlink, main_frame, local->op_ret, local->op_errno,
- local->inode, &local->cont.dir_fop.buf,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(symlink, main_frame, local->op_ret, local->op_errno,
+ local->inode, &local->cont.dir_fop.buf,
+ &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_symlink_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent, dict_t *xdata)
+afr_symlink_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, buf,
- preparent, postparent, NULL, NULL, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ preparent, postparent, NULL, NULL, xdata);
}
-
int
-afr_symlink_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_symlink_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
-
- STACK_WIND_COOKIE (frame, afr_symlink_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->symlink,
- local->cont.symlink.linkpath, &local->loc,
- local->umask, local->xdata_req);
- return 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ STACK_WIND_COOKIE(frame, afr_symlink_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->symlink,
+ local->cont.symlink.linkpath, &local->loc, local->umask,
+ local->xdata_req);
+ return 0;
}
-
int
-afr_symlink (call_frame_t *frame, xlator_t *this, const char *linkpath,
- loc_t *loc, mode_t umask, dict_t *xdata)
+afr_symlink(call_frame_t *frame, xlator_t *this, const char *linkpath,
+ loc_t *loc, mode_t umask, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
- local->parent = inode_ref (loc->parent);
-
- local->cont.symlink.linkpath = gf_strdup (linkpath);
- local->umask = umask;
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->op = GF_FOP_SYMLINK;
- local->transaction.wind = afr_symlink_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_symlink_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, loc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (loc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[0], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- int_lock->lockee_count++;
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, loc);
+ local->inode = inode_ref(loc->inode);
+ local->parent = inode_ref(loc->parent);
+
+ local->cont.symlink.linkpath = gf_strdup(linkpath);
+ local->umask = umask;
+
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->op = GF_FOP_SYMLINK;
+ local->transaction.wind = afr_symlink_wind;
+ local->transaction.unwind = afr_symlink_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, loc, &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(loc->path);
+ ret = afr_transaction(transaction_frame, this, AFR_ENTRY_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (symlink, frame, -1, op_errno, NULL, NULL, NULL,
- NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(symlink, frame, -1, op_errno, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
}
/* }}} */
@@ -1044,161 +939,118 @@ out:
/* {{{ rename */
int
-afr_rename_unwind (call_frame_t *frame, xlator_t *this)
+afr_rename_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
-
- local = frame->local;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (rename, main_frame, local->op_ret, local->op_errno,
- &local->cont.dir_fop.buf,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent,
- &local->cont.dir_fop.prenewparent,
- &local->cont.dir_fop.postnewparent, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(rename, main_frame, local->op_ret, local->op_errno,
+ &local->cont.dir_fop.buf, &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent,
+ &local->cont.dir_fop.prenewparent,
+ &local->cont.dir_fop.postnewparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_rename_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *buf,
- struct iatt *preoldparent, struct iatt *postoldparent,
- struct iatt *prenewparent, struct iatt *postnewparent,
- dict_t *xdata)
+afr_rename_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent,
+ dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, buf,
- preoldparent, postoldparent, prenewparent,
- postnewparent, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, buf,
+ preoldparent, postoldparent, prenewparent,
+ postnewparent, xdata);
}
-
int
-afr_rename_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_rename_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_rename_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->rename,
- &local->loc, &local->newloc, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_rename_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->rename, &local->loc,
+ &local->newloc, local->xdata_req);
+ return 0;
}
-
int
-afr_rename (call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
- dict_t *xdata)
+afr_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
- int nlockee = 0;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame) {
- op_errno = ENOMEM;
- goto out;
- }
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, oldloc);
- loc_copy (&local->newloc, newloc);
-
- local->inode = inode_ref (oldloc->inode);
- local->parent = inode_ref (oldloc->parent);
- local->parent2 = inode_ref (newloc->parent);
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->op = GF_FOP_RENAME;
- local->transaction.wind = afr_rename_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_rename_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, oldloc,
- &op_errno);
- if (ret)
- goto out;
- ret = afr_build_parent_loc (&local->transaction.new_parent_loc, newloc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (oldloc->path);
- local->transaction.new_basename = AFR_BASENAME (newloc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = nlockee = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[nlockee], local,
- &local->transaction.new_parent_loc,
- local->transaction.new_basename,
- priv->child_count);
- if (ret)
- goto out;
-
- nlockee++;
- ret = afr_init_entry_lockee (&int_lock->lockee[nlockee], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- nlockee++;
- if (local->newloc.inode && IA_ISDIR (local->newloc.inode->ia_type)) {
- ret = afr_init_entry_lockee (&int_lock->lockee[nlockee], local,
- &local->newloc,
- NULL,
- priv->child_count);
- if (ret)
- goto out;
-
- nlockee++;
- }
- qsort (int_lock->lockee, nlockee, sizeof (*int_lock->lockee),
- afr_entry_lockee_cmp);
- int_lock->lockee_count = nlockee;
-
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_RENAME_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, oldloc);
+ loc_copy(&local->newloc, newloc);
+
+ local->inode = inode_ref(oldloc->inode);
+ local->parent = inode_ref(oldloc->parent);
+ local->parent2 = inode_ref(newloc->parent);
+
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->op = GF_FOP_RENAME;
+ local->transaction.wind = afr_rename_wind;
+ local->transaction.unwind = afr_rename_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, oldloc,
+ &op_errno);
+ if (ret)
+ goto out;
+ ret = afr_build_parent_loc(&local->transaction.new_parent_loc, newloc,
+ &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(oldloc->path);
+ local->transaction.new_basename = AFR_BASENAME(newloc->path);
+ ret = afr_transaction(transaction_frame, this,
+ AFR_ENTRY_RENAME_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (rename, frame, -1, op_errno, NULL, NULL, NULL, NULL,
- NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(rename, frame, -1, op_errno, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
}
/* }}} */
@@ -1206,263 +1058,205 @@ out:
/* {{{ unlink */
int
-afr_unlink_unwind (call_frame_t *frame, xlator_t *this)
+afr_unlink_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
-
- local = frame->local;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (unlink, main_frame, local->op_ret, local->op_errno,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(unlink, main_frame, local->op_ret, local->op_errno,
+ &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_unlink_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *preparent,
- struct iatt *postparent, dict_t *xdata)
+afr_unlink_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, NULL,
- preparent, postparent, NULL, NULL, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ preparent, postparent, NULL, NULL, xdata);
}
-
int
-afr_unlink_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_unlink_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_unlink_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->unlink,
- &local->loc, local->xflag, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_unlink_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->unlink, &local->loc,
+ local->xflag, local->xdata_req);
+ return 0;
}
-
int
-afr_unlink (call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
- dict_t *xdata)
+afr_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, loc);
- local->xflag = xflag;
-
- local->inode = inode_ref (loc->inode);
- local->parent = inode_ref (loc->parent);
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->op = GF_FOP_UNLINK;
- local->transaction.wind = afr_unlink_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_unlink_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, loc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (loc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[0], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- int_lock->lockee_count++;
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, loc);
+ local->xflag = xflag;
+
+ local->inode = inode_ref(loc->inode);
+ local->parent = inode_ref(loc->parent);
+
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->op = GF_FOP_UNLINK;
+ local->transaction.wind = afr_unlink_wind;
+ local->transaction.unwind = afr_unlink_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, loc, &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(loc->path);
+ ret = afr_transaction(transaction_frame, this, AFR_ENTRY_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (unlink, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(unlink, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
/* }}} */
/* {{{ rmdir */
-
-
int
-afr_rmdir_unwind (call_frame_t *frame, xlator_t *this)
+afr_rmdir_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *main_frame = NULL;
- afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (rmdir, main_frame, local->op_ret, local->op_errno,
- &local->cont.dir_fop.preparent,
- &local->cont.dir_fop.postparent, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(rmdir, main_frame, local->op_ret, local->op_errno,
+ &local->cont.dir_fop.preparent,
+ &local->cont.dir_fop.postparent, local->xdata_rsp);
+ return 0;
+}
int
-afr_rmdir_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *preparent,
- struct iatt *postparent, dict_t *xdata)
+afr_rmdir_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- return __afr_dir_write_cbk (frame, cookie, this, op_ret, op_errno, NULL,
- preparent, postparent, NULL, NULL, xdata);
+ return __afr_dir_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ preparent, postparent, NULL, NULL, xdata);
}
-
int
-afr_rmdir_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_rmdir_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_rmdir_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->rmdir,
- &local->loc, local->cont.rmdir.flags, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_rmdir_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->rmdir, &local->loc,
+ local->cont.rmdir.flags, local->xdata_req);
+ return 0;
}
-
int
-afr_rmdir (call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
- dict_t *xdata)
+afr_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
- int nlockee = 0;
-
- priv = this->private;
-
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
-
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
-
-
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
- local->parent = inode_ref (loc->parent);
-
- local->cont.rmdir.flags = flags;
-
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
-
- if (!local->xdata_req)
- goto out;
-
- local->op = GF_FOP_RMDIR;
- local->transaction.wind = afr_rmdir_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_rmdir_unwind;
-
- ret = afr_build_parent_loc (&local->transaction.parent_loc, loc,
- &op_errno);
- if (ret)
- goto out;
-
- local->transaction.main_frame = frame;
- local->transaction.basename = AFR_BASENAME (loc->path);
- int_lock = &local->internal_lock;
-
- int_lock->lockee_count = nlockee = 0;
- ret = afr_init_entry_lockee (&int_lock->lockee[nlockee], local,
- &local->transaction.parent_loc,
- local->transaction.basename,
- priv->child_count);
- if (ret)
- goto out;
-
- nlockee++;
- ret = afr_init_entry_lockee (&int_lock->lockee[nlockee], local,
- &local->loc,
- NULL,
- priv->child_count);
- if (ret)
- goto out;
-
- nlockee++;
- qsort (int_lock->lockee, nlockee, sizeof (*int_lock->lockee),
- afr_entry_lockee_cmp);
- int_lock->lockee_count = nlockee;
-
- ret = afr_transaction (transaction_frame, this, AFR_ENTRY_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
-
- return 0;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
+
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, loc);
+ local->inode = inode_ref(loc->inode);
+ local->parent = inode_ref(loc->parent);
+
+ local->cont.rmdir.flags = flags;
+
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->op = GF_FOP_RMDIR;
+ local->transaction.wind = afr_rmdir_wind;
+ local->transaction.unwind = afr_rmdir_unwind;
+
+ ret = afr_build_parent_loc(&local->transaction.parent_loc, loc, &op_errno);
+ if (ret)
+ goto out;
+
+ local->transaction.main_frame = frame;
+ local->transaction.basename = AFR_BASENAME(loc->path);
+ ret = afr_transaction(transaction_frame, this, AFR_ENTRY_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (rmdir, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(rmdir, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
/* }}} */
diff --git a/xlators/cluster/afr/src/afr-dir-write.h b/xlators/cluster/afr/src/afr-dir-write.h
index 02f0a3682d9..1d88c3b9b26 100644
--- a/xlators/cluster/afr/src/afr-dir-write.h
+++ b/xlators/cluster/afr/src/afr-dir-write.h
@@ -12,36 +12,35 @@
#define __DIR_WRITE_H__
int32_t
-afr_create (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int32_t flags, mode_t mode,
- mode_t umask, fd_t *fd, dict_t *xdata);
+afr_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata);
int32_t
-afr_mknod (call_frame_t *frame, xlator_t *this,
- loc_t *loc, mode_t mode, dev_t dev, mode_t umask, dict_t *xdata);
+afr_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t dev, mode_t umask, dict_t *xdata);
int32_t
-afr_mkdir (call_frame_t *frame, xlator_t *this,
- loc_t *loc, mode_t mode, mode_t umask, dict_t *xdata);
+afr_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata);
int32_t
-afr_unlink (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int xflag, dict_t *xdata);
+afr_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata);
int32_t
-afr_rmdir (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int flags, dict_t *xdata);
+afr_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ dict_t *xdata);
int32_t
-afr_link (call_frame_t *frame, xlator_t *this,
- loc_t *oldloc, loc_t *newloc, dict_t *xdata);
+afr_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata);
int32_t
-afr_rename (call_frame_t *frame, xlator_t *this,
- loc_t *oldloc, loc_t *newloc, dict_t *xdata);
+afr_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata);
int
-afr_symlink (call_frame_t *frame, xlator_t *this,
- const char *linkpath, loc_t *oldloc, mode_t umask, dict_t *params);
+afr_symlink(call_frame_t *frame, xlator_t *this, const char *linkpath,
+ loc_t *oldloc, mode_t umask, dict_t *params);
#endif /* __DIR_WRITE_H__ */
diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
index 1690cb684dd..c5521704de2 100644
--- a/xlators/cluster/afr/src/afr-inode-read.c
+++ b/xlators/cluster/afr/src/afr-inode-read.c
@@ -8,7 +8,6 @@
cases as published by the Free Software Foundation.
*/
-
#include <libgen.h>
#include <unistd.h>
#include <fnmatch.h>
@@ -16,21 +15,17 @@
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "afr.h"
-#include "dict.h"
-#include "xlator.h"
-#include "hashfn.h"
-#include "logging.h"
-#include "stack.h"
-#include "list.h"
-#include "call-stub.h"
-#include "byte-order.h"
-#include "defaults.h"
-#include "common-utils.h"
-#include "compat-errno.h"
-#include "compat.h"
-#include "quota-common-utils.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/list.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/quota-common-utils.h>
#include "afr-transaction.h"
#include "afr-messages.h"
@@ -45,146 +40,146 @@
* */
int
-afr_handle_quota_size (call_frame_t *frame, xlator_t *this)
+afr_handle_quota_size(call_frame_t *frame, xlator_t *this)
{
- unsigned char *readable = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- int i = 0;
- int ret = 0;
- quota_meta_t size = {0, };
- quota_meta_t max_size = {0, };
- int readable_cnt = 0;
- int read_subvol = -1;
-
- local = frame->local;
- priv = this->private;
- replies = local->replies;
-
- readable = alloca0 (priv->child_count);
-
- afr_inode_read_subvol_get (local->inode, this, readable, 0, 0);
-
- readable_cnt = AFR_COUNT (readable, priv->child_count);
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid || replies[i].op_ret == -1)
- continue;
- if (readable_cnt && !readable[i])
- continue;
- if (!replies[i].xdata)
- continue;
- ret = quota_dict_get_meta (replies[i].xdata, QUOTA_SIZE_KEY,
- &size);
- if (ret == -1)
- continue;
- if (read_subvol == -1)
- read_subvol = i;
- if (size.size > max_size.size ||
- (size.file_count + size.dir_count) >
- (max_size.file_count + max_size.dir_count))
- read_subvol = i;
-
- if (size.size > max_size.size)
- max_size.size = size.size;
- if (size.file_count > max_size.file_count)
- max_size.file_count = size.file_count;
- if (size.dir_count > max_size.dir_count)
- max_size.dir_count = size.dir_count;
- }
-
- if (max_size.size == 0 && max_size.file_count == 0 &&
- max_size.dir_count == 0)
- return read_subvol;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid || replies[i].op_ret == -1)
- continue;
- if (readable_cnt && !readable[i])
- continue;
- if (!replies[i].xdata)
- continue;
- quota_dict_set_meta (replies[i].xdata, QUOTA_SIZE_KEY,
- &max_size, IA_IFDIR);
- }
-
+ unsigned char *readable = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ int i = 0;
+ int ret = 0;
+ quota_meta_t size = {
+ 0,
+ };
+ quota_meta_t max_size = {
+ 0,
+ };
+ int readable_cnt = 0;
+ int read_subvol = -1;
+
+ local = frame->local;
+ priv = this->private;
+ replies = local->replies;
+
+ readable = alloca0(priv->child_count);
+
+ afr_inode_read_subvol_get(local->inode, this, readable, 0, 0);
+
+ readable_cnt = AFR_COUNT(readable, priv->child_count);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ if (readable_cnt && !readable[i])
+ continue;
+ if (!replies[i].xdata)
+ continue;
+ ret = quota_dict_get_meta(replies[i].xdata, QUOTA_SIZE_KEY,
+ SLEN(QUOTA_SIZE_KEY), &size);
+ if (ret == -1)
+ continue;
+ if (read_subvol == -1)
+ read_subvol = i;
+ if (size.size > max_size.size ||
+ (size.file_count + size.dir_count) >
+ (max_size.file_count + max_size.dir_count))
+ read_subvol = i;
+
+ if (size.size > max_size.size)
+ max_size.size = size.size;
+ if (size.file_count > max_size.file_count)
+ max_size.file_count = size.file_count;
+ if (size.dir_count > max_size.dir_count)
+ max_size.dir_count = size.dir_count;
+ }
+
+ if (max_size.size == 0 && max_size.file_count == 0 &&
+ max_size.dir_count == 0)
return read_subvol;
-}
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ if (readable_cnt && !readable[i])
+ continue;
+ if (!replies[i].xdata)
+ continue;
+ quota_dict_set_meta(replies[i].xdata, QUOTA_SIZE_KEY, &max_size,
+ IA_IFDIR);
+ }
+
+ return read_subvol;
+}
/* {{{ access */
int
-afr_access_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, dict_t *xdata)
+afr_access_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0) {
- local->op_ret = op_ret;
- local->op_errno = op_errno;
+ if (op_ret < 0) {
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- AFR_STACK_UNWIND (access, frame, op_ret, op_errno, xdata);
+ AFR_STACK_UNWIND(access, frame, op_ret, op_errno, xdata);
- return 0;
+ return 0;
}
-
int
-afr_access_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_access_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
-
- priv = this->private;
- local = frame->local;
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (access, frame, local->op_ret,
- local->op_errno, 0);
- return 0;
- }
-
- STACK_WIND_COOKIE (frame, afr_access_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->access,
- &local->loc, local->cont.access.mask,
- local->xdata_req);
- return 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(access, frame, local->op_ret, local->op_errno, 0);
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(frame, afr_access_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->access, &local->loc,
+ local->cont.access.mask, local->xdata_req);
+ return 0;
}
int
-afr_access (call_frame_t *frame, xlator_t *this, loc_t *loc,
- int mask, dict_t *xdata)
+afr_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int mask,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int op_errno = 0;
+ afr_local_t *local = NULL;
+ int op_errno = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local->op = GF_FOP_ACCESS;
- loc_copy (&local->loc, loc);
- local->cont.access.mask = mask;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->op = GF_FOP_ACCESS;
+ loc_copy(&local->loc, loc);
+ local->cont.access.mask = mask;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- afr_read_txn (frame, this, loc->inode, afr_access_wind,
- AFR_METADATA_TRANSACTION);
+ afr_read_txn(frame, this, loc->inode, afr_access_wind,
+ AFR_METADATA_TRANSACTION);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (access, frame, -1, op_errno, NULL);
+ AFR_STACK_UNWIND(access, frame, -1, op_errno, NULL);
- return 0;
+ return 0;
}
/* }}} */
@@ -192,152 +187,140 @@ out:
/* {{{ stat */
int
-afr_stat_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- struct iatt *buf, dict_t *xdata)
+afr_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0) {
- local->op_ret = op_ret;
- local->op_errno = op_errno;
+ if (op_ret < 0) {
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- AFR_STACK_UNWIND (stat, frame, op_ret, op_errno, buf, xdata);
+ AFR_STACK_UNWIND(stat, frame, op_ret, op_errno, buf, xdata);
- return 0;
+ return 0;
}
-
int
-afr_stat_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_stat_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
-
- priv = this->private;
- local = frame->local;
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (stat, frame, local->op_ret, local->op_errno,
- 0, 0);
- return 0;
- }
-
- STACK_WIND_COOKIE (frame, afr_stat_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->stat,
- &local->loc, local->xdata_req);
- return 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(stat, frame, local->op_ret, local->op_errno, 0, 0);
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(
+ frame, afr_stat_cbk, (void *)(long)subvol, priv->children[subvol],
+ priv->children[subvol]->fops->stat, &local->loc, local->xdata_req);
+ return 0;
}
int
-afr_stat (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+afr_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int op_errno = 0;
+ afr_local_t *local = NULL;
+ int op_errno = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local->op = GF_FOP_STAT;
- loc_copy (&local->loc, loc);
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->op = GF_FOP_STAT;
+ loc_copy(&local->loc, loc);
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- afr_read_txn (frame, this, loc->inode, afr_stat_wind,
- AFR_DATA_TRANSACTION);
+ afr_read_txn(frame, this, loc->inode, afr_stat_wind, AFR_DATA_TRANSACTION);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (stat, frame, -1, op_errno, NULL, NULL);
+ AFR_STACK_UNWIND(stat, frame, -1, op_errno, NULL, NULL);
- return 0;
+ return 0;
}
-
/* }}} */
/* {{{ fstat */
int
-afr_fstat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *buf,
- dict_t *xdata)
+afr_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0) {
- local->op_ret = op_ret;
- local->op_errno = op_errno;
+ if (op_ret < 0) {
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- AFR_STACK_UNWIND (fstat, frame, op_ret, op_errno, buf, xdata);
+ AFR_STACK_UNWIND(fstat, frame, op_ret, op_errno, buf, xdata);
- return 0;
+ return 0;
}
-
int
-afr_fstat_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_fstat_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
-
- priv = this->private;
- local = frame->local;
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (fstat, frame, local->op_ret, local->op_errno,
- 0, 0);
- return 0;
- }
-
- STACK_WIND_COOKIE (frame, afr_fstat_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->fstat,
- local->fd, local->xdata_req);
- return 0;
-}
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ priv = this->private;
+ local = frame->local;
+
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(fstat, frame, local->op_ret, local->op_errno, 0, 0);
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(
+ frame, afr_fstat_cbk, (void *)(long)subvol, priv->children[subvol],
+ priv->children[subvol]->fops->fstat, local->fd, local->xdata_req);
+ return 0;
+}
int32_t
-afr_fstat (call_frame_t *frame, xlator_t *this,
- fd_t *fd, dict_t *xdata)
+afr_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int op_errno = 0;
+ afr_local_t *local = NULL;
+ int op_errno = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local->op = GF_FOP_FSTAT;
- local->fd = fd_ref (fd);
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->op = GF_FOP_FSTAT;
+ local->fd = fd_ref(fd);
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- afr_read_txn (frame, this, fd->inode, afr_fstat_wind,
- AFR_DATA_TRANSACTION);
+ afr_read_txn(frame, this, fd->inode, afr_fstat_wind, AFR_DATA_TRANSACTION);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (fstat, frame, -1, op_errno, NULL, NULL);
+ AFR_STACK_UNWIND(fstat, frame, -1, op_errno, NULL, NULL);
- return 0;
+ return 0;
}
/* }}} */
@@ -345,1446 +328,1493 @@ out:
/* {{{ readlink */
int
-afr_readlink_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- const char *buf, struct iatt *sbuf, dict_t *xdata)
+afr_readlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, const char *buf,
+ struct iatt *sbuf, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = op_errno;
+ if (op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- AFR_STACK_UNWIND (readlink, frame, op_ret, op_errno,
- buf, sbuf, xdata);
- return 0;
+ AFR_STACK_UNWIND(readlink, frame, op_ret, op_errno, buf, sbuf, xdata);
+ return 0;
}
int
-afr_readlink_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_readlink_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (readlink, frame, local->op_ret,
- local->op_errno, 0, 0, 0);
- return 0;
- }
-
- STACK_WIND_COOKIE (frame, afr_readlink_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->readlink,
- &local->loc, local->cont.readlink.size,
- local->xdata_req);
- return 0;
-}
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ local = frame->local;
+ priv = this->private;
+
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(readlink, frame, local->op_ret, local->op_errno, 0, 0,
+ 0);
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(frame, afr_readlink_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->readlink, &local->loc,
+ local->cont.readlink.size, local->xdata_req);
+ return 0;
+}
int
-afr_readlink (call_frame_t *frame, xlator_t *this,
- loc_t *loc, size_t size, dict_t *xdata)
+afr_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
+ dict_t *xdata)
{
- afr_local_t * local = NULL;
- int32_t op_errno = 0;
+ afr_local_t *local = NULL;
+ int32_t op_errno = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local->op = GF_FOP_READLINK;
- loc_copy (&local->loc, loc);
- local->cont.readlink.size = size;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->op = GF_FOP_READLINK;
+ loc_copy(&local->loc, loc);
+ local->cont.readlink.size = size;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- afr_read_txn (frame, this, loc->inode, afr_readlink_wind,
- AFR_DATA_TRANSACTION);
+ afr_read_txn(frame, this, loc->inode, afr_readlink_wind,
+ AFR_DATA_TRANSACTION);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND(readlink, frame, -1, op_errno, 0, 0, 0);
+ AFR_STACK_UNWIND(readlink, frame, -1, op_errno, 0, 0, 0);
- return 0;
+ return 0;
}
-
/* }}} */
/* {{{ getxattr */
struct _xattr_key {
- char *key;
- struct list_head list;
+ char *key;
+ struct list_head list;
};
-
int
-__gather_xattr_keys (dict_t *dict, char *key, data_t *value,
- void *data)
+__gather_xattr_keys(dict_t *dict, char *key, data_t *value, void *data)
{
- struct list_head * list = data;
- struct _xattr_key * xkey = NULL;
-
- if (!strncmp (key, AFR_XATTR_PREFIX,
- strlen (AFR_XATTR_PREFIX))) {
+ struct list_head *list = data;
+ struct _xattr_key *xkey = NULL;
- xkey = GF_CALLOC (1, sizeof (*xkey), gf_afr_mt_xattr_key);
- if (!xkey)
- return -1;
+ if (!strncmp(key, AFR_XATTR_PREFIX, SLEN(AFR_XATTR_PREFIX))) {
+ xkey = GF_MALLOC(sizeof(*xkey), gf_afr_mt_xattr_key);
+ if (!xkey)
+ return -1;
- xkey->key = key;
- INIT_LIST_HEAD (&xkey->list);
+ xkey->key = key;
+ INIT_LIST_HEAD(&xkey->list);
- list_add_tail (&xkey->list, list);
- }
- return 0;
+ list_add_tail(&xkey->list, list);
+ }
+ return 0;
}
-
void
-afr_filter_xattrs (dict_t *dict)
+afr_filter_xattrs(dict_t *dict)
{
- struct list_head keys = {0,};
- struct _xattr_key *key = NULL;
- struct _xattr_key *tmp = NULL;
+ struct list_head keys = {
+ 0,
+ };
+ struct _xattr_key *key = NULL;
+ struct _xattr_key *tmp = NULL;
- INIT_LIST_HEAD (&keys);
+ INIT_LIST_HEAD(&keys);
- dict_foreach (dict, __gather_xattr_keys,
- (void *) &keys);
+ dict_foreach(dict, __gather_xattr_keys, (void *)&keys);
- list_for_each_entry_safe (key, tmp, &keys, list) {
- dict_del (dict, key->key);
+ list_for_each_entry_safe(key, tmp, &keys, list)
+ {
+ dict_del(dict, key->key);
- list_del_init (&key->list);
+ list_del_init(&key->list);
- GF_FREE (key);
- }
+ GF_FREE(key);
+ }
}
-static
-gf_boolean_t
-afr_getxattr_ignorable_errnos (int32_t op_errno)
+static gf_boolean_t
+afr_getxattr_ignorable_errnos(int32_t op_errno)
{
- if (op_errno == ENODATA || op_errno == ENOTSUP || op_errno == ERANGE ||
- op_errno == ENAMETOOLONG)
- return _gf_true;
+ if (op_errno == ENODATA || op_errno == ENOTSUP || op_errno == ERANGE ||
+ op_errno == ENAMETOOLONG)
+ return _gf_true;
- return _gf_false;
+ return _gf_false;
}
int
-afr_getxattr_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0 && !afr_getxattr_ignorable_errnos(op_errno)) {
- local->op_ret = op_ret;
- local->op_errno = op_errno;
+ if (op_ret < 0 && !afr_getxattr_ignorable_errnos(op_errno)) {
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- if (dict)
- afr_filter_xattrs (dict);
+ if (dict)
+ afr_filter_xattrs(dict);
- AFR_STACK_UNWIND (getxattr, frame, op_ret, op_errno, dict, xdata);
+ AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, dict, xdata);
- return 0;
+ return 0;
}
-
int
-afr_getxattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_getxattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (getxattr, frame, local->op_ret,
- local->op_errno, NULL, NULL);
- return 0;
- }
-
- STACK_WIND_COOKIE (frame, afr_getxattr_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->getxattr,
- &local->loc, local->cont.getxattr.name,
- local->xdata_req);
- return 0;
-}
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(getxattr, frame, local->op_ret, local->op_errno, NULL,
+ NULL);
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(frame, afr_getxattr_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->getxattr, &local->loc,
+ local->cont.getxattr.name, local->xdata_req);
+ return 0;
+}
int32_t
-afr_getxattr_unwind (call_frame_t *frame, int op_ret, int op_errno,
- dict_t *dict, dict_t *xdata)
+afr_getxattr_unwind(call_frame_t *frame, int op_ret, int op_errno, dict_t *dict,
+ dict_t *xdata)
{
- AFR_STACK_UNWIND (getxattr, frame, op_ret, op_errno, dict, xdata);
- return 0;
+ AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, dict, xdata);
+ return 0;
}
int32_t
-afr_fgetxattr_clrlk_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_fgetxattr_clrlk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- xlator_t **children = NULL;
- dict_t *xattr = NULL;
- char *tmp_report = NULL;
- char lk_summary[1024] = {0,};
- int serz_len = 0;
- int32_t callcnt = 0;
- long int cky = 0;
- int ret = 0;
-
- priv = this->private;
- children = priv->children;
-
- local = frame->local;
- cky = (long) cookie;
-
- LOCK (&frame->lock);
- {
- callcnt = --local->call_count;
- if (op_ret == -1)
- local->replies[cky].op_errno = op_errno;
-
- if (!local->dict)
- local->dict = dict_new ();
- if (local->dict) {
- ret = dict_get_str (dict, local->cont.getxattr.name,
- &tmp_report);
- if (ret)
- goto unlock;
- ret = dict_set_dynstr (local->dict,
- children[cky]->name,
- gf_strdup (tmp_report));
- if (ret)
- goto unlock;
- }
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ xlator_t **children = NULL;
+ dict_t *xattr = NULL;
+ char *tmp_report = NULL;
+ char lk_summary[1024] = {
+ 0,
+ };
+ int serz_len = 0;
+ int32_t callcnt = 0;
+ long int cky = 0;
+ int ret = 0;
+ int keylen = 0;
+ int children_keylen = 0;
+
+ priv = this->private;
+ children = priv->children;
+
+ local = frame->local;
+ cky = (long)cookie;
+ keylen = strlen(local->cont.getxattr.name);
+ children_keylen = strlen(children[cky]->name);
+
+ LOCK(&frame->lock);
+ {
+ callcnt = --local->call_count;
+ if (op_ret == -1)
+ local->replies[cky].op_errno = op_errno;
+
+ if (!local->dict)
+ local->dict = dict_new();
+ if (local->dict) {
+ ret = dict_get_strn(dict, local->cont.getxattr.name, keylen,
+ &tmp_report);
+ if (ret)
+ goto unlock;
+ ret = dict_set_dynstrn(local->dict, children[cky]->name,
+ children_keylen, gf_strdup(tmp_report));
+ if (ret)
+ goto unlock;
}
+ }
unlock:
- UNLOCK (&frame->lock);
-
- if (!callcnt) {
- xattr = dict_new ();
- if (!xattr) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
- }
- ret = dict_serialize_value_with_delim (local->dict,
- lk_summary,
- &serz_len, '\n');
- if (ret) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
- }
- if (serz_len == -1)
- snprintf (lk_summary, sizeof (lk_summary),
- "No locks cleared.");
- ret = dict_set_dynstr (xattr, local->cont.getxattr.name,
- gf_strdup (lk_summary));
- if (ret) {
- op_ret = -1;
- op_errno = ENOMEM;
- gf_msg (this->name, GF_LOG_ERROR,
- ENOMEM, AFR_MSG_DICT_SET_FAILED,
- "Error setting dictionary");
- goto unwind;
- }
+ UNLOCK(&frame->lock);
+
+ if (!callcnt) {
+ xattr = dict_new();
+ if (!xattr) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ ret = dict_serialize_value_with_delim(local->dict, lk_summary,
+ &serz_len, '\n');
+ if (ret) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ if (serz_len == -1)
+ snprintf(lk_summary, sizeof(lk_summary), "No locks cleared.");
+ ret = dict_set_dynstrn(xattr, local->cont.getxattr.name, keylen,
+ gf_strdup(lk_summary));
+ if (ret) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_DICT_SET_FAILED,
+ "Error setting dictionary");
+ goto unwind;
+ }
- unwind:
- // Updating child_errno with more recent 'events'
- op_errno = afr_final_errno (local, priv);
+ op_errno = afr_final_errno(local, priv);
- AFR_STACK_UNWIND (fgetxattr, frame, op_ret, op_errno, xattr,
- xdata);
- if (xattr)
- dict_unref (xattr);
- }
+ unwind:
+ AFR_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, xattr, xdata);
+ if (xattr)
+ dict_unref(xattr);
+ }
- return ret;
+ return ret;
}
int32_t
-afr_getxattr_clrlk_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_getxattr_clrlk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- xlator_t **children = NULL;
- dict_t *xattr = NULL;
- char *tmp_report = NULL;
- char lk_summary[1024] = {0,};
- int serz_len = 0;
- int32_t callcnt = 0;
- long int cky = 0;
- int ret = 0;
-
- priv = this->private;
- children = priv->children;
-
- local = frame->local;
- cky = (long) cookie;
-
- LOCK (&frame->lock);
- {
- callcnt = --local->call_count;
- if (op_ret == -1)
- local->replies[cky].op_errno = op_errno;
-
- if (!local->dict)
- local->dict = dict_new ();
- if (local->dict) {
- ret = dict_get_str (dict, local->cont.getxattr.name,
- &tmp_report);
- if (ret)
- goto unlock;
- ret = dict_set_dynstr (local->dict,
- children[cky]->name,
- gf_strdup (tmp_report));
- if (ret)
- goto unlock;
- }
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ xlator_t **children = NULL;
+ dict_t *xattr = NULL;
+ char *tmp_report = NULL;
+ char lk_summary[1024] = {
+ 0,
+ };
+ int serz_len = 0;
+ int32_t callcnt = 0;
+ long int cky = 0;
+ int ret = 0;
+ int keylen = 0;
+ int children_keylen = 0;
+
+ priv = this->private;
+ children = priv->children;
+
+ local = frame->local;
+ cky = (long)cookie;
+
+ keylen = strlen(local->cont.getxattr.name);
+ children_keylen = strlen(children[cky]->name);
+
+ LOCK(&frame->lock);
+ {
+ callcnt = --local->call_count;
+ if (op_ret == -1)
+ local->replies[cky].op_errno = op_errno;
+
+ if (!local->dict)
+ local->dict = dict_new();
+ if (local->dict) {
+ ret = dict_get_strn(dict, local->cont.getxattr.name, keylen,
+ &tmp_report);
+ if (ret)
+ goto unlock;
+ ret = dict_set_dynstrn(local->dict, children[cky]->name,
+ children_keylen, gf_strdup(tmp_report));
+ if (ret)
+ goto unlock;
}
+ }
unlock:
- UNLOCK (&frame->lock);
-
- if (!callcnt) {
- xattr = dict_new ();
- if (!xattr) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
- }
- ret = dict_serialize_value_with_delim (local->dict,
- lk_summary,
- &serz_len, '\n');
- if (ret) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
- }
- if (serz_len == -1)
- snprintf (lk_summary, sizeof (lk_summary),
- "No locks cleared.");
- ret = dict_set_dynstr (xattr, local->cont.getxattr.name,
- gf_strdup (lk_summary));
- if (ret) {
- op_ret = -1;
- op_errno = ENOMEM;
- gf_msg (this->name, GF_LOG_ERROR,
- ENOMEM, AFR_MSG_DICT_SET_FAILED,
- "Error setting dictionary");
- goto unwind;
- }
+ UNLOCK(&frame->lock);
+
+ if (!callcnt) {
+ xattr = dict_new();
+ if (!xattr) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ ret = dict_serialize_value_with_delim(local->dict, lk_summary,
+ &serz_len, '\n');
+ if (ret) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ if (serz_len == -1)
+ snprintf(lk_summary, sizeof(lk_summary), "No locks cleared.");
+ ret = dict_set_dynstrn(xattr, local->cont.getxattr.name, keylen,
+ gf_strdup(lk_summary));
+ if (ret) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_DICT_SET_FAILED,
+ "Error setting dictionary");
+ goto unwind;
+ }
- unwind:
- // Updating child_errno with more recent 'events'
- op_errno = afr_final_errno (local, priv);
+ op_errno = afr_final_errno(local, priv);
- AFR_STACK_UNWIND (getxattr, frame, op_ret, op_errno, xattr, xdata);
+ unwind:
+ AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, xattr, xdata);
- if (xattr)
- dict_unref (xattr);
- }
+ if (xattr)
+ dict_unref(xattr);
+ }
- return ret;
+ return ret;
}
/**
* node-uuid cbk uses next child querying mechanism
*/
int32_t
-afr_getxattr_node_uuid_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_getxattr_node_uuid_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- xlator_t **children = NULL;
- int unwind = 1;
- int curr_call_child = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ xlator_t **children = NULL;
+ int unwind = 1;
+ int curr_call_child = 0;
- priv = this->private;
- children = priv->children;
+ priv = this->private;
+ children = priv->children;
- local = frame->local;
+ local = frame->local;
+
+ if (op_ret == -1) { /** query the _next_ child */
+
+ /**
+ * _current_ becomes _next_
+ * If done with all children and yet no success; give up !
+ */
+ curr_call_child = (int)((long)cookie);
+ if (++curr_call_child == priv->child_count)
+ goto unwind;
+
+ gf_msg_debug(this->name, op_errno,
+ "op_ret (-1): Re-querying afr-child (%d/%d)",
+ curr_call_child, priv->child_count);
+
+ unwind = 0;
+ STACK_WIND_COOKIE(
+ frame, afr_getxattr_node_uuid_cbk, (void *)(long)curr_call_child,
+ children[curr_call_child],
+ children[curr_call_child]->fops->getxattr, &local->loc,
+ local->cont.getxattr.name, local->xdata_req);
+ }
+
+unwind:
+ if (unwind)
+ AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, dict, xdata);
+
+ return 0;
+}
+
+/**
+ * list-node-uuids cbk returns the list of node_uuids for the subvolume.
+ */
+int32_t
+afr_getxattr_list_node_uuids_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int32_t callcnt = 0;
+ int ret = 0;
+ char *xattr_serz = NULL;
+ long cky = 0;
+ int32_t tlen = 0;
+
+ local = frame->local;
+ priv = this->private;
+ cky = (long)cookie;
+
+ LOCK(&frame->lock);
+ {
+ callcnt = --local->call_count;
+ local->replies[cky].valid = 1;
+ local->replies[cky].op_ret = op_ret;
+ local->replies[cky].op_errno = op_errno;
+
+ if (op_ret < 0)
+ goto unlock;
+
+ local->op_ret = 0;
+
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
+ local->replies[cky].xattr = dict_ref(dict);
+ }
+
+unlock:
+ UNLOCK(&frame->lock);
- if (op_ret == -1) { /** query the _next_ child */
-
- /**
- * _current_ becomes _next_
- * If done with all childs and yet no success; give up !
- */
- curr_call_child = (int) ((long)cookie);
- if (++curr_call_child == priv->child_count)
- goto unwind;
-
- gf_msg_debug (this->name, op_errno,
- "op_ret (-1): Re-querying afr-child (%d/%d)",
- curr_call_child, priv->child_count);
-
- unwind = 0;
- STACK_WIND_COOKIE (frame, afr_getxattr_node_uuid_cbk,
- (void *) (long) curr_call_child,
- children[curr_call_child],
- children[curr_call_child]->fops->getxattr,
- &local->loc,
- local->cont.getxattr.name,
- NULL);
+ if (!callcnt) {
+ if (local->op_ret != 0) {
+ /* All bricks gave an error. */
+ local->op_errno = afr_final_errno(local, priv);
+ goto unwind;
}
- unwind:
- if (unwind)
- AFR_STACK_UNWIND (getxattr, frame, op_ret, op_errno, dict,
- NULL);
+ /*Since we store the UUID0_STR as node uuid for down bricks and
+ *for non zero op_ret, assigning length to priv->child_count
+ *number of uuids*/
+ local->cont.getxattr.xattr_len = (SLEN(UUID0_STR) + 2) *
+ priv->child_count;
+
+ if (!local->dict)
+ local->dict = dict_new();
+ if (!local->dict) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unwind;
+ }
- return 0;
+ xattr_serz = GF_CALLOC(local->cont.getxattr.xattr_len, sizeof(char),
+ gf_common_mt_char);
+
+ if (!xattr_serz) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ ret = afr_serialize_xattrs_with_delimiter(frame, this, xattr_serz,
+ UUID0_STR, &tlen, ' ');
+ if (ret) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ GF_FREE(xattr_serz);
+ goto unwind;
+ }
+ ret = dict_set_dynstr_sizen(local->dict, GF_XATTR_LIST_NODE_UUIDS_KEY,
+ xattr_serz);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Cannot set node_uuid key in dict");
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ if (ret == -EINVAL)
+ GF_FREE(xattr_serz);
+ } else {
+ local->op_ret = local->cont.getxattr.xattr_len - 1;
+ local->op_errno = 0;
+ }
+
+ unwind:
+ AFR_STACK_UNWIND(getxattr, frame, local->op_ret, local->op_errno,
+ local->dict, local->xdata_rsp);
+ }
+
+ return ret;
}
int32_t
-afr_getxattr_quota_size_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_getxattr_quota_size_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- int idx = (long) cookie;
- int call_count = 0;
- afr_local_t *local = frame->local;
- int read_subvol = -1;
-
- local->replies[idx].valid = 1;
- local->replies[idx].op_ret = op_ret;
- local->replies[idx].op_errno = op_errno;
- if (dict)
- local->replies[idx].xdata = dict_ref (dict);
- call_count = afr_frame_return (frame);
- if (call_count == 0) {
- local->inode = inode_ref (local->loc.inode);
- read_subvol = afr_handle_quota_size (frame, this);
- if (read_subvol != -1) {
- op_ret = local->replies[read_subvol].op_ret;
- op_errno = local->replies[read_subvol].op_errno;
- dict = local->replies[read_subvol].xdata;
- }
- AFR_STACK_UNWIND (getxattr, frame, op_ret, op_errno, dict,
- xdata);
+ int idx = (long)cookie;
+ int call_count = 0;
+ afr_local_t *local = frame->local;
+ int read_subvol = -1;
+
+ local->replies[idx].valid = 1;
+ local->replies[idx].op_ret = op_ret;
+ local->replies[idx].op_errno = op_errno;
+ if (dict)
+ local->replies[idx].xdata = dict_ref(dict);
+ call_count = afr_frame_return(frame);
+ if (call_count == 0) {
+ local->inode = inode_ref(local->loc.inode);
+ read_subvol = afr_handle_quota_size(frame, this);
+ if (read_subvol != -1) {
+ op_ret = local->replies[read_subvol].op_ret;
+ op_errno = local->replies[read_subvol].op_errno;
+ dict = local->replies[read_subvol].xdata;
}
+ AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, dict, xdata);
+ }
- return 0;
+ return 0;
}
int32_t
-afr_getxattr_lockinfo_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_getxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- int call_cnt = 0, len = 0;
- char *lockinfo_buf = NULL;
- dict_t *lockinfo = NULL, *newdict = NULL;
- afr_local_t *local = NULL;
-
- LOCK (&frame->lock);
- {
- local = frame->local;
+ int call_cnt = 0, len = 0;
+ char *lockinfo_buf = NULL;
+ dict_t *lockinfo = NULL, *newdict = NULL;
+ afr_local_t *local = NULL;
- call_cnt = --local->call_count;
+ LOCK(&frame->lock);
+ {
+ local = frame->local;
- if ((op_ret < 0) || (!dict && !xdata)) {
- goto unlock;
- }
+ call_cnt = --local->call_count;
- if (xdata) {
- if (!local->xdata_rsp) {
- local->xdata_rsp = dict_new ();
- if (!local->xdata_rsp) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
- }
- }
- }
+ if ((op_ret < 0) || (!dict && !xdata)) {
+ goto unlock;
+ }
- if (!dict) {
- goto unlock;
+ if (xdata) {
+ if (!local->xdata_rsp) {
+ local->xdata_rsp = dict_new();
+ if (!local->xdata_rsp) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unlock;
}
+ }
+ }
- op_ret = dict_get_ptr_and_len (dict, GF_XATTR_LOCKINFO_KEY,
- (void **)&lockinfo_buf, &len);
+ if (!dict) {
+ goto unlock;
+ }
- if (!lockinfo_buf) {
- goto unlock;
- }
+ op_ret = dict_get_ptr_and_len(dict, GF_XATTR_LOCKINFO_KEY,
+ (void **)&lockinfo_buf, &len);
- if (!local->dict) {
- local->dict = dict_new ();
- if (!local->dict) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
- }
- }
- }
-unlock:
- UNLOCK (&frame->lock);
-
- if (lockinfo_buf != NULL) {
- lockinfo = dict_new ();
- if (lockinfo == NULL) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- } else {
- op_ret = dict_unserialize (lockinfo_buf, len,
- &lockinfo);
-
- if (lockinfo && local->dict) {
- dict_copy (lockinfo, local->dict);
- }
- }
+ if (!lockinfo_buf) {
+ goto unlock;
}
- if (xdata && local->xdata_rsp) {
- dict_copy (xdata, local->xdata_rsp);
+ if (!local->dict) {
+ local->dict = dict_new();
+ if (!local->dict) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unlock;
+ }
}
+ }
+unlock:
+ UNLOCK(&frame->lock);
- if (!call_cnt) {
- newdict = dict_new ();
- if (!newdict) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unwind;
- }
-
- len = dict_serialized_length (local->dict);
- if (len <= 0) {
- goto unwind;
- }
-
- lockinfo_buf = GF_CALLOC (1, len, gf_common_mt_char);
- if (!lockinfo_buf) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unwind;
- }
+ if (lockinfo_buf != NULL) {
+ lockinfo = dict_new();
+ if (lockinfo == NULL) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ } else {
+ op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
- op_ret = dict_serialize (local->dict, lockinfo_buf);
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
- }
+ if (lockinfo && local->dict) {
+ dict_copy(lockinfo, local->dict);
+ }
+ }
+ }
+
+ if (xdata && local->xdata_rsp) {
+ dict_copy(xdata, local->xdata_rsp);
+ }
+
+ if (!call_cnt) {
+ newdict = dict_new();
+ if (!newdict) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unwind;
+ }
- op_ret = dict_set_dynptr (newdict, GF_XATTR_LOCKINFO_KEY,
- (void *)lockinfo_buf, len);
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
- goto unwind;
- }
+ op_ret = dict_allocate_and_serialize(
+ local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
+ if (op_ret != 0) {
+ local->op_ret = -1;
+ goto unwind;
+ }
- unwind:
- AFR_STACK_UNWIND (getxattr, frame, op_ret,
- op_errno, newdict,
- local->xdata_rsp);
+ op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
+ (void *)lockinfo_buf, len);
+ if (op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = -op_ret;
+ goto unwind;
}
- dict_unref (lockinfo);
+ unwind:
+ AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, newdict,
+ local->xdata_rsp);
+ }
- return 0;
+ dict_unref(lockinfo);
+
+ return 0;
}
int32_t
-afr_fgetxattr_lockinfo_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_fgetxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- int call_cnt = 0, len = 0;
- char *lockinfo_buf = NULL;
- dict_t *lockinfo = NULL, *newdict = NULL;
- afr_local_t *local = NULL;
+ int call_cnt = 0, len = 0;
+ char *lockinfo_buf = NULL;
+ dict_t *lockinfo = NULL, *newdict = NULL;
+ afr_local_t *local = NULL;
- LOCK (&frame->lock);
- {
- local = frame->local;
+ LOCK(&frame->lock);
+ {
+ local = frame->local;
- call_cnt = --local->call_count;
+ call_cnt = --local->call_count;
- if ((op_ret < 0) || (!dict && !xdata)) {
- goto unlock;
- }
+ if ((op_ret < 0) || (!dict && !xdata)) {
+ goto unlock;
+ }
- if (xdata) {
- if (!local->xdata_rsp) {
- local->xdata_rsp = dict_new ();
- if (!local->xdata_rsp) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
- }
- }
+ if (xdata) {
+ if (!local->xdata_rsp) {
+ local->xdata_rsp = dict_new();
+ if (!local->xdata_rsp) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unlock;
}
+ }
+ }
- if (!dict) {
- goto unlock;
- }
+ if (!dict) {
+ goto unlock;
+ }
- op_ret = dict_get_ptr_and_len (dict, GF_XATTR_LOCKINFO_KEY,
- (void **)&lockinfo_buf, &len);
+ op_ret = dict_get_ptr_and_len(dict, GF_XATTR_LOCKINFO_KEY,
+ (void **)&lockinfo_buf, &len);
- if (!lockinfo_buf) {
- goto unlock;
- }
-
- if (!local->dict) {
- local->dict = dict_new ();
- if (!local->dict) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
- }
- }
- }
-unlock:
- UNLOCK (&frame->lock);
-
- if (lockinfo_buf != NULL) {
- lockinfo = dict_new ();
- if (lockinfo == NULL) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- } else {
- op_ret = dict_unserialize (lockinfo_buf, len,
- &lockinfo);
-
- if (lockinfo && local->dict) {
- dict_copy (lockinfo, local->dict);
- }
- }
+ if (!lockinfo_buf) {
+ goto unlock;
}
- if (xdata && local->xdata_rsp) {
- dict_copy (xdata, local->xdata_rsp);
+ if (!local->dict) {
+ local->dict = dict_new();
+ if (!local->dict) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unlock;
+ }
}
+ }
+unlock:
+ UNLOCK(&frame->lock);
- if (!call_cnt) {
- newdict = dict_new ();
- if (!newdict) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unwind;
- }
-
- len = dict_serialized_length (local->dict);
- if (len <= 0) {
- goto unwind;
- }
-
- lockinfo_buf = GF_CALLOC (1, len, gf_common_mt_char);
- if (!lockinfo_buf) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unwind;
- }
+ if (lockinfo_buf != NULL) {
+ lockinfo = dict_new();
+ if (lockinfo == NULL) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ } else {
+ op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
- op_ret = dict_serialize (local->dict, lockinfo_buf);
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
- }
+ if (lockinfo && local->dict) {
+ dict_copy(lockinfo, local->dict);
+ }
+ }
+ }
+
+ if (xdata && local->xdata_rsp) {
+ dict_copy(xdata, local->xdata_rsp);
+ }
+
+ if (!call_cnt) {
+ newdict = dict_new();
+ if (!newdict) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto unwind;
+ }
- op_ret = dict_set_dynptr (newdict, GF_XATTR_LOCKINFO_KEY,
- (void *)lockinfo_buf, len);
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
- goto unwind;
- }
+ op_ret = dict_allocate_and_serialize(
+ local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
+ if (op_ret != 0) {
+ local->op_ret = -1;
+ goto unwind;
+ }
- unwind:
- AFR_STACK_UNWIND (fgetxattr, frame, op_ret,
- op_errno, newdict,
- local->xdata_rsp);
+ op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
+ (void *)lockinfo_buf, len);
+ if (op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = -op_ret;
+ goto unwind;
}
- dict_unref (lockinfo);
+ unwind:
+ AFR_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, newdict,
+ local->xdata_rsp);
+ }
- return 0;
+ dict_unref(lockinfo);
+
+ return 0;
}
int32_t
-afr_fgetxattr_pathinfo_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_fgetxattr_pathinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int32_t callcnt = 0;
- int ret = 0;
- char *xattr = NULL;
- char *xattr_serz = NULL;
- char xattr_cky[1024] = {0,};
- dict_t *nxattr = NULL;
- long cky = 0;
- int32_t padding = 0;
- int32_t tlen = 0;
-
- if (!frame || !frame->local || !this) {
- gf_msg ("", GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_ARG, "possible NULL deref");
- goto out;
+ afr_local_t *local = NULL;
+ int32_t callcnt = 0;
+ int ret = 0;
+ char *xattr = NULL;
+ char *xattr_serz = NULL;
+ int keylen = 0;
+ char xattr_cky[1024] = {
+ 0,
+ };
+ int xattr_cky_len = 0;
+ dict_t *nxattr = NULL;
+ long cky = 0;
+ int32_t padding = 0;
+ int32_t tlen = 0;
+
+ if (!frame || !frame->local || !this) {
+ gf_msg("", GF_LOG_ERROR, 0, AFR_MSG_INVALID_ARG, "possible NULL deref");
+ goto out;
+ }
+
+ local = frame->local;
+ cky = (long)cookie;
+ keylen = strlen(local->cont.getxattr.name);
+ xattr_cky_len = snprintf(xattr_cky, sizeof(xattr_cky), "%s-%ld",
+ local->cont.getxattr.name, cky);
+ LOCK(&frame->lock);
+ {
+ callcnt = --local->call_count;
+
+ if (op_ret < 0) {
+ local->op_errno = op_errno;
+ } else {
+ local->op_ret = op_ret;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
}
- local = frame->local;
- cky = (long) cookie;
-
- LOCK (&frame->lock);
- {
- callcnt = --local->call_count;
-
- if (op_ret < 0) {
- local->op_errno = op_errno;
- } else {
- local->op_ret = op_ret;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
+ if (!dict || (op_ret < 0))
+ goto unlock;
- if (!dict || (op_ret < 0))
- goto unlock;
-
- if (!local->dict)
- local->dict = dict_new ();
-
- if (local->dict) {
- ret = dict_get_str (dict,
- local->cont.getxattr.name,
- &xattr);
- if (ret)
- goto unlock;
-
- xattr = gf_strdup (xattr);
-
- (void)snprintf (xattr_cky, 1024, "%s-%ld",
- local->cont.getxattr.name, cky);
- ret = dict_set_dynstr (local->dict,
- xattr_cky, xattr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Cannot set xattr cookie key");
- goto unlock;
- }
-
- local->cont.getxattr.xattr_len
- += strlen (xattr) + 1;
- }
+ if (!local->dict) {
+ local->dict = dict_new();
+ if (!local->dict)
+ goto unlock;
+ }
+ ret = dict_get_strn(dict, local->cont.getxattr.name, keylen, &xattr);
+ if (ret)
+ goto unlock;
+
+ xattr = gf_strdup(xattr);
+
+ ret = dict_set_dynstrn(local->dict, xattr_cky, xattr_cky_len, xattr);
+ if (ret) {
+ UNLOCK(&frame->lock);
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Cannot set xattr cookie key");
+ goto post_unlock;
}
-unlock:
- UNLOCK (&frame->lock);
-
- if (!callcnt) {
- if (!local->cont.getxattr.xattr_len)
- goto unwind;
-
- nxattr = dict_new ();
- if (!nxattr)
- goto unwind;
-
- /* extra bytes for decorations (brackets and <>'s) */
- padding += strlen (this->name)
- + strlen (AFR_PATHINFO_HEADER) + 4;
- local->cont.getxattr.xattr_len += (padding + 2);
-
- xattr_serz = GF_CALLOC (local->cont.getxattr.xattr_len,
- sizeof (char), gf_common_mt_char);
-
- if (!xattr_serz)
- goto unwind;
-
- /* the xlator info */
- (void) sprintf (xattr_serz, "(<"AFR_PATHINFO_HEADER"%s> ",
- this->name);
-
- /* actual series of pathinfo */
- ret = dict_serialize_value_with_delim (local->dict,
- xattr_serz
- + strlen (xattr_serz),
- &tlen, ' ');
- if (ret) {
- goto unwind;
- }
- /* closing part */
- *(xattr_serz + padding + tlen) = ')';
- *(xattr_serz + padding + tlen + 1) = '\0';
+ local->cont.getxattr.xattr_len += strlen(xattr) + 1;
+ }
+unlock:
+ UNLOCK(&frame->lock);
+post_unlock:
+ if (!callcnt) {
+ if (!local->cont.getxattr.xattr_len)
+ goto unwind;
+
+ nxattr = dict_new();
+ if (!nxattr)
+ goto unwind;
+
+ /* extra bytes for decorations (brackets and <>'s) */
+ padding += strlen(this->name) + SLEN(AFR_PATHINFO_HEADER) + 4;
+ local->cont.getxattr.xattr_len += (padding + 2);
+
+ xattr_serz = GF_MALLOC(local->cont.getxattr.xattr_len,
+ gf_common_mt_char);
+
+ if (!xattr_serz)
+ goto unwind;
+
+ /* the xlator info */
+ int xattr_serz_len = sprintf(
+ xattr_serz, "(<" AFR_PATHINFO_HEADER "%s> ", this->name);
+
+ /* actual series of pathinfo */
+ ret = dict_serialize_value_with_delim(
+ local->dict, xattr_serz + xattr_serz_len, &tlen, ' ');
+ if (ret) {
+ GF_FREE(xattr_serz);
+ goto unwind;
+ }
- ret = dict_set_dynstr (nxattr, local->cont.getxattr.name,
- xattr_serz);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Cannot set pathinfo key in dict");
+ /* closing part */
+ *(xattr_serz + padding + tlen) = ')';
+ *(xattr_serz + padding + tlen + 1) = '\0';
+
+ ret = dict_set_dynstrn(nxattr, local->cont.getxattr.name, keylen,
+ xattr_serz);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Cannot set pathinfo key in dict");
+ if (ret == -EINVAL)
+ GF_FREE(xattr_serz);
+ }
- unwind:
- AFR_STACK_UNWIND (fgetxattr, frame, local->op_ret,
- local->op_errno, nxattr, local->xdata_rsp);
+ unwind:
+ AFR_STACK_UNWIND(fgetxattr, frame, local->op_ret, local->op_errno,
+ nxattr, local->xdata_rsp);
- if (nxattr)
- dict_unref (nxattr);
- }
+ if (nxattr)
+ dict_unref(nxattr);
+ }
out:
- return ret;
+ return ret;
}
int32_t
-afr_getxattr_pathinfo_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_getxattr_pathinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int32_t callcnt = 0;
- int ret = 0;
- char *xattr = NULL;
- char *xattr_serz = NULL;
- char xattr_cky[1024] = {0,};
- dict_t *nxattr = NULL;
- long cky = 0;
- int32_t padding = 0;
- int32_t tlen = 0;
-
- if (!frame || !frame->local || !this) {
- gf_msg ("", GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_ARG, "possible NULL deref");
- goto out;
- }
+ afr_local_t *local = NULL;
+ int32_t callcnt = 0;
+ int ret = 0;
+ char *xattr = NULL;
+ char *xattr_serz = NULL;
+ char xattr_cky[1024] = {
+ 0,
+ };
+ int keylen = 0;
+ int xattr_cky_len = 0;
+ dict_t *nxattr = NULL;
+ long cky = 0;
+ int32_t padding = 0;
+ int32_t tlen = 0;
+
+ if (!frame || !frame->local || !this) {
+ gf_msg("", GF_LOG_ERROR, 0, AFR_MSG_INVALID_ARG, "possible NULL deref");
+ goto out;
+ }
+
+ local = frame->local;
+ cky = (long)cookie;
+ keylen = strlen(local->cont.getxattr.name);
+ xattr_cky_len = snprintf(xattr_cky, sizeof(xattr_cky), "%s-%ld",
+ local->cont.getxattr.name, cky);
+ LOCK(&frame->lock);
+ {
+ callcnt = --local->call_count;
- local = frame->local;
- cky = (long) cookie;
-
- LOCK (&frame->lock);
- {
- callcnt = --local->call_count;
-
- if (op_ret < 0) {
- local->op_errno = op_errno;
- } else {
- local->op_ret = op_ret;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
-
- if (!dict || (op_ret < 0))
- goto unlock;
-
- if (!local->dict)
- local->dict = dict_new ();
-
- if (local->dict) {
- ret = dict_get_str (dict,
- local->cont.getxattr.name,
- &xattr);
- if (ret)
- goto unlock;
-
- xattr = gf_strdup (xattr);
-
- (void)snprintf (xattr_cky, 1024, "%s-%ld",
- local->cont.getxattr.name, cky);
- ret = dict_set_dynstr (local->dict,
- xattr_cky, xattr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret,
- AFR_MSG_DICT_SET_FAILED,
- "Cannot set xattr "
- "cookie key");
- goto unlock;
- }
-
- local->cont.getxattr.xattr_len += strlen (xattr) + 1;
- }
- }
-unlock:
- UNLOCK (&frame->lock);
-
- if (!callcnt) {
- if (!local->cont.getxattr.xattr_len)
- goto unwind;
-
- nxattr = dict_new ();
- if (!nxattr)
- goto unwind;
-
- /* extra bytes for decorations (brackets and <>'s) */
- padding += strlen (this->name) + strlen (AFR_PATHINFO_HEADER) + 4;
- local->cont.getxattr.xattr_len += (padding + 2);
-
- xattr_serz = GF_CALLOC (local->cont.getxattr.xattr_len,
- sizeof (char), gf_common_mt_char);
-
- if (!xattr_serz)
- goto unwind;
+ if (op_ret < 0) {
+ local->op_errno = op_errno;
+ } else {
+ local->op_ret = op_ret;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
+ }
- /* the xlator info */
- (void) sprintf (xattr_serz, "(<"AFR_PATHINFO_HEADER"%s> ",
- this->name);
+ if (!dict || (op_ret < 0))
+ goto unlock;
- /* actual series of pathinfo */
- ret = dict_serialize_value_with_delim (local->dict,
- xattr_serz + strlen (xattr_serz),
- &tlen, ' ');
- if (ret) {
- goto unwind;
- }
+ if (!local->dict) {
+ local->dict = dict_new();
+ if (!local->dict)
+ goto unlock;
+ }
+ ret = dict_get_strn(dict, local->cont.getxattr.name, keylen, &xattr);
+ if (ret)
+ goto unlock;
+
+ xattr = gf_strdup(xattr);
+
+ ret = dict_set_dynstrn(local->dict, xattr_cky, xattr_cky_len, xattr);
+ if (ret) {
+ UNLOCK(&frame->lock);
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Cannot set xattr cookie key");
+ goto post_unlock;
+ }
- /* closing part */
- *(xattr_serz + padding + tlen) = ')';
- *(xattr_serz + padding + tlen + 1) = '\0';
+ local->cont.getxattr.xattr_len += strlen(xattr) + 1;
+ }
+unlock:
+ UNLOCK(&frame->lock);
+post_unlock:
+ if (!callcnt) {
+ if (!local->cont.getxattr.xattr_len)
+ goto unwind;
+
+ nxattr = dict_new();
+ if (!nxattr)
+ goto unwind;
+
+ /* extra bytes for decorations (brackets and <>'s) */
+ padding += strlen(this->name) + SLEN(AFR_PATHINFO_HEADER) + 4;
+ local->cont.getxattr.xattr_len += (padding + 2);
+
+ xattr_serz = GF_MALLOC(local->cont.getxattr.xattr_len,
+ gf_common_mt_char);
+
+ if (!xattr_serz)
+ goto unwind;
+
+ /* the xlator info */
+ int xattr_serz_len = sprintf(
+ xattr_serz, "(<" AFR_PATHINFO_HEADER "%s> ", this->name);
+
+ /* actual series of pathinfo */
+ ret = dict_serialize_value_with_delim(
+ local->dict, xattr_serz + xattr_serz_len, &tlen, ' ');
+ if (ret) {
+ GF_FREE(xattr_serz);
+ goto unwind;
+ }
- ret = dict_set_dynstr (nxattr, local->cont.getxattr.name,
- xattr_serz);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Cannot set pathinfo key in dict");
+ /* closing part */
+ *(xattr_serz + padding + tlen) = ')';
+ *(xattr_serz + padding + tlen + 1) = '\0';
+
+ ret = dict_set_dynstrn(nxattr, local->cont.getxattr.name, keylen,
+ xattr_serz);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Cannot set pathinfo key in dict");
+ if (ret == -EINVAL)
+ GF_FREE(xattr_serz);
+ }
- unwind:
- AFR_STACK_UNWIND (getxattr, frame, local->op_ret,
- local->op_errno, nxattr, local->xdata_rsp);
+ unwind:
+ AFR_STACK_UNWIND(getxattr, frame, local->op_ret, local->op_errno,
+ nxattr, local->xdata_rsp);
- if (nxattr)
- dict_unref (nxattr);
- }
+ if (nxattr)
+ dict_unref(nxattr);
+ }
out:
- return ret;
+ return ret;
}
static int
-afr_aggregate_stime_xattr (dict_t *this, char *key, data_t *value, void *data)
+afr_aggregate_stime_xattr(dict_t *this, char *key, data_t *value, void *data)
{
- int ret = 0;
+ int ret = 0;
- if (fnmatch (GF_XATTR_STIME_PATTERN, key, FNM_NOESCAPE) == 0)
- ret = gf_get_max_stime (THIS, data, key, value);
+ if (fnmatch(GF_XATTR_STIME_PATTERN, key, FNM_NOESCAPE) == 0)
+ ret = gf_get_max_stime(THIS, data, key, value);
- return ret;
+ return ret;
}
int32_t
-afr_common_getxattr_stime_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_common_getxattr_stime_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int32_t callcnt = 0;
+ afr_local_t *local = NULL;
+ int32_t callcnt = 0;
- if (!frame || !frame->local || !this) {
- gf_msg ("", GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_ARG, "possible NULL deref");
- goto out;
- }
+ if (!frame || !frame->local || !this) {
+ gf_msg("", GF_LOG_ERROR, 0, AFR_MSG_INVALID_ARG, "possible NULL deref");
+ goto out;
+ }
- local = frame->local;
+ local = frame->local;
- LOCK (&frame->lock);
- {
- callcnt = --local->call_count;
+ LOCK(&frame->lock);
+ {
+ callcnt = --local->call_count;
- if (!dict || (op_ret < 0)) {
- local->op_errno = op_errno;
- goto cleanup;
- }
-
- if (!local->dict)
- local->dict = dict_copy_with_ref (dict, NULL);
- else
- dict_foreach (dict, afr_aggregate_stime_xattr,
- local->dict);
- local->op_ret = 0;
+ if (!dict || (op_ret < 0)) {
+ local->op_errno = op_errno;
+ goto cleanup;
}
+ if (!local->dict)
+ local->dict = dict_copy_with_ref(dict, NULL);
+ else
+ dict_foreach(dict, afr_aggregate_stime_xattr, local->dict);
+ local->op_ret = 0;
+ }
+
cleanup:
- UNLOCK (&frame->lock);
+ UNLOCK(&frame->lock);
- if (!callcnt) {
- AFR_STACK_UNWIND (getxattr, frame, local->op_ret,
- local->op_errno, local->dict, xdata);
- }
+ if (!callcnt) {
+ AFR_STACK_UNWIND(getxattr, frame, local->op_ret, local->op_errno,
+ local->dict, xdata);
+ }
out:
- return 0;
+ return 0;
}
-
static gf_boolean_t
-afr_is_special_xattr (const char *name, fop_getxattr_cbk_t *cbk,
- gf_boolean_t is_fgetxattr)
+afr_is_special_xattr(const char *name, fop_getxattr_cbk_t *cbk,
+ gf_boolean_t is_fgetxattr)
{
- gf_boolean_t is_spl = _gf_true;
-
- GF_ASSERT (cbk);
- if (!cbk || !name) {
- is_spl = _gf_false;
- goto out;
+ gf_boolean_t is_spl = _gf_true;
+
+ GF_ASSERT(cbk);
+ if (!cbk || !name) {
+ is_spl = _gf_false;
+ goto out;
+ }
+
+ if (!strcmp(name, GF_XATTR_PATHINFO_KEY) ||
+ !strcmp(name, GF_XATTR_USER_PATHINFO_KEY)) {
+ if (is_fgetxattr) {
+ *cbk = afr_fgetxattr_pathinfo_cbk;
+ } else {
+ *cbk = afr_getxattr_pathinfo_cbk;
}
-
- if (!strcmp (name, GF_XATTR_PATHINFO_KEY) ||
- !strcmp (name, GF_XATTR_USER_PATHINFO_KEY)) {
- if (is_fgetxattr) {
- *cbk = afr_fgetxattr_pathinfo_cbk;
- } else {
- *cbk = afr_getxattr_pathinfo_cbk;
- }
- } else if (!strncmp (name, GF_XATTR_CLRLK_CMD,
- strlen (GF_XATTR_CLRLK_CMD))) {
- if (is_fgetxattr) {
- *cbk = afr_fgetxattr_clrlk_cbk;
- } else {
- *cbk = afr_getxattr_clrlk_cbk;
- }
- } else if (!strncmp (name, GF_XATTR_LOCKINFO_KEY,
- strlen (GF_XATTR_LOCKINFO_KEY))) {
- if (is_fgetxattr) {
- *cbk = afr_fgetxattr_lockinfo_cbk;
- } else {
- *cbk = afr_getxattr_lockinfo_cbk;
- }
- } else if (fnmatch (GF_XATTR_STIME_PATTERN, name, FNM_NOESCAPE) == 0) {
- *cbk = afr_common_getxattr_stime_cbk;
- } else if (strcmp (name, QUOTA_SIZE_KEY) == 0) {
- *cbk = afr_getxattr_quota_size_cbk;
+ } else if (!strncmp(name, GF_XATTR_CLRLK_CMD, SLEN(GF_XATTR_CLRLK_CMD))) {
+ if (is_fgetxattr) {
+ *cbk = afr_fgetxattr_clrlk_cbk;
} else {
- is_spl = _gf_false;
+ *cbk = afr_getxattr_clrlk_cbk;
}
+ } else if (!strncmp(name, GF_XATTR_LOCKINFO_KEY,
+ SLEN(GF_XATTR_LOCKINFO_KEY))) {
+ if (is_fgetxattr) {
+ *cbk = afr_fgetxattr_lockinfo_cbk;
+ } else {
+ *cbk = afr_getxattr_lockinfo_cbk;
+ }
+ } else if (fnmatch(GF_XATTR_STIME_PATTERN, name, FNM_NOESCAPE) == 0) {
+ *cbk = afr_common_getxattr_stime_cbk;
+ } else if (strcmp(name, QUOTA_SIZE_KEY) == 0) {
+ *cbk = afr_getxattr_quota_size_cbk;
+ } else if (!strcmp(name, GF_XATTR_LIST_NODE_UUIDS_KEY)) {
+ *cbk = afr_getxattr_list_node_uuids_cbk;
+ } else {
+ is_spl = _gf_false;
+ }
out:
- return is_spl;
+ return is_spl;
}
static void
-afr_getxattr_all_subvols (xlator_t *this, call_frame_t *frame,
- const char *name, loc_t *loc,
- fop_getxattr_cbk_t cbk)
+afr_getxattr_all_subvols(xlator_t *this, call_frame_t *frame, const char *name,
+ loc_t *loc, fop_getxattr_cbk_t cbk)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int call_count = 0;
-
- priv = this->private;
-
- local = frame->local;
- //local->call_count set in afr_local_init
- call_count = local->call_count;
-
- //If up-children count is 0, afr_local_init would have failed already
- //and the call would have unwound so not handling it here.
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, cbk,
- (void *) (long) i, priv->children[i],
- priv->children[i]->fops->getxattr,
- loc, name, NULL);
- if (!--call_count)
- break;
- }
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+ int call_count = 0;
+
+ priv = this->private;
+
+ local = frame->local;
+ // local->call_count set in afr_local_init
+ call_count = local->call_count;
+
+ if (!strcmp(name, GF_XATTR_LIST_NODE_UUIDS_KEY)) {
+ GF_FREE(local->cont.getxattr.name);
+ local->cont.getxattr.name = gf_strdup(GF_XATTR_NODE_UUID_KEY);
+ }
+
+ // If up-children count is 0, afr_local_init would have failed already
+ // and the call would have unwound so not handling it here.
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(frame, cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->getxattr, loc,
+ local->cont.getxattr.name, NULL);
+ if (!--call_count)
+ break;
}
- return;
+ }
+ return;
}
int
-afr_marker_populate_args (call_frame_t *frame, int type, int *gauge,
- xlator_t **subvols)
+afr_marker_populate_args(call_frame_t *frame, int type, int *gauge,
+ xlator_t **subvols)
{
- xlator_t *this = frame->this;
- afr_private_t *priv = this->private;
+ xlator_t *this = frame->this;
+ afr_private_t *priv = this->private;
- memcpy (subvols, priv->children, sizeof (*subvols) * priv->child_count);
+ memcpy(subvols, priv->children, sizeof(*subvols) * priv->child_count);
- if (type == MARKER_XTIME_TYPE) {
- /*Don't error out on ENOENT/ENOTCONN */
- gauge[MCNT_NOTFOUND] = 0;
- gauge[MCNT_ENOTCONN] = 0;
- }
- return priv->child_count;
+ if (type == MARKER_XTIME_TYPE) {
+ /*Don't error out on ENOENT/ENOTCONN */
+ gauge[MCNT_NOTFOUND] = 0;
+ gauge[MCNT_ENOTCONN] = 0;
+ }
+ return priv->child_count;
}
static int
-afr_handle_heal_xattrs (call_frame_t *frame, xlator_t *this, loc_t *loc,
- const char *heal_op)
+afr_handle_heal_xattrs(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *heal_op)
{
- int ret = -1;
- afr_spb_status_t *data = NULL;
+ int ret = -1;
+ afr_spb_status_t *data = NULL;
- if (!strcmp (heal_op, GF_HEAL_INFO)) {
- afr_get_heal_info (frame, this, loc);
- ret = 0;
- goto out;
- }
+ if (!strcmp(heal_op, GF_HEAL_INFO)) {
+ afr_get_heal_info(frame, this, loc);
+ ret = 0;
+ goto out;
+ }
- if (!strcmp (heal_op, GF_AFR_HEAL_SBRAIN)) {
- afr_heal_splitbrain_file (frame, this, loc);
- ret = 0;
- goto out;
+ if (!strcmp(heal_op, GF_AFR_HEAL_SBRAIN)) {
+ afr_heal_splitbrain_file(frame, this, loc);
+ ret = 0;
+ goto out;
+ }
+
+ if (!strcmp(heal_op, GF_AFR_SBRAIN_STATUS)) {
+ data = GF_CALLOC(1, sizeof(*data), gf_afr_mt_spb_status_t);
+ if (!data) {
+ ret = 1;
+ goto out;
}
-
- if (!strcmp (heal_op, GF_AFR_SBRAIN_STATUS)) {
- data = GF_CALLOC (1, sizeof (*data), gf_afr_mt_spb_status_t);
- if (!data) {
- ret = 1;
- goto out;
- }
- data->frame = frame;
- data->loc = loc;
- ret = synctask_new (this->ctx->env,
- afr_get_split_brain_status,
- afr_get_split_brain_status_cbk,
- NULL, data);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SPLIT_BRAIN_STATUS,
- "Failed to create"
- " synctask. Unable to fetch split-brain status"
- " for %s.", loc->name);
- ret = 1;
- goto out;
- }
- goto out;
+ data->frame = frame;
+ data->loc = loc;
+ ret = synctask_new(this->ctx->env, afr_get_split_brain_status,
+ afr_get_split_brain_status_cbk, NULL, data);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN_STATUS,
+ "Failed to create"
+ " synctask. Unable to fetch split-brain status"
+ " for %s.",
+ loc->name);
+ ret = 1;
+ goto out;
}
+ goto out;
+ }
out:
- if (ret == 1) {
- AFR_STACK_UNWIND (getxattr, frame, -1, ENOMEM, NULL, NULL);
- if (data)
- GF_FREE (data);
- ret = 0;
- }
- return ret;
+ if (ret == 1) {
+ AFR_STACK_UNWIND(getxattr, frame, -1, ENOMEM, NULL, NULL);
+ if (data)
+ GF_FREE(data);
+ ret = 0;
+ }
+ return ret;
}
int32_t
-afr_getxattr (call_frame_t *frame, xlator_t *this,
- loc_t *loc, const char *name, dict_t *xdata)
+afr_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- xlator_t **children = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int32_t op_errno = 0;
- int ret = -1;
- fop_getxattr_cbk_t cbk = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ xlator_t **children = NULL;
+ int i = 0;
+ int32_t op_errno = 0;
+ int ret = -1;
+ fop_getxattr_cbk_t cbk = NULL;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ priv = this->private;
- priv = this->private;
+ children = priv->children;
- children = priv->children;
+ loc_copy(&local->loc, loc);
- loc_copy (&local->loc, loc);
+ local->op = GF_FOP_GETXATTR;
- local->op = GF_FOP_GETXATTR;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ if (!name)
+ goto no_name;
- if (!name)
- goto no_name;
+ local->cont.getxattr.name = gf_strdup(name);
- local->cont.getxattr.name = gf_strdup (name);
+ if (!local->cont.getxattr.name) {
+ op_errno = ENOMEM;
+ goto out;
+ }
- if (!local->cont.getxattr.name) {
- op_errno = ENOMEM;
- goto out;
- }
+ if (!strncmp(name, AFR_XATTR_PREFIX, SLEN(AFR_XATTR_PREFIX))) {
+ op_errno = ENODATA;
+ goto out;
+ }
- if (!strncmp (name, AFR_XATTR_PREFIX,
- strlen (AFR_XATTR_PREFIX))) {
- op_errno = ENODATA;
- goto out;
- }
-
- if (cluster_handle_marker_getxattr (frame, loc, name, priv->vol_uuid,
- afr_getxattr_unwind,
- afr_marker_populate_args) == 0)
- return 0;
+ if (cluster_handle_marker_getxattr(frame, loc, name, priv->vol_uuid,
+ afr_getxattr_unwind,
+ afr_marker_populate_args) == 0)
+ return 0;
- ret = afr_handle_heal_xattrs (frame, this, &local->loc, name);
- if (ret == 0)
- return 0;
+ ret = afr_handle_heal_xattrs(frame, this, &local->loc, name);
+ if (ret == 0)
+ return 0;
- /*
- * Special xattrs which need responses from all subvols
- */
- if (afr_is_special_xattr (name, &cbk, 0)) {
- afr_getxattr_all_subvols (this, frame, name, loc, cbk);
- return 0;
- }
+ /*
+ * Heal daemons don't have IO threads ... and as a result they
+ * send this getxattr down and eventually crash :(
+ */
+ op_errno = -1;
+ GF_CHECK_XATTR_KEY_AND_GOTO(name, IO_THREADS_QUEUE_SIZE_KEY, op_errno, out);
+
+ /*
+ * Special xattrs which need responses from all subvols
+ */
+ if (afr_is_special_xattr(name, &cbk, 0)) {
+ afr_getxattr_all_subvols(this, frame, name, loc, cbk);
+ return 0;
+ }
- if (XATTR_IS_NODE_UUID (name)) {
- i = 0;
- STACK_WIND_COOKIE (frame, afr_getxattr_node_uuid_cbk,
- (void *) (long) i,
- children[i],
- children[i]->fops->getxattr,
- loc, name, xdata);
- return 0;
- }
+ if (XATTR_IS_NODE_UUID(name)) {
+ i = 0;
+ STACK_WIND_COOKIE(frame, afr_getxattr_node_uuid_cbk, (void *)(long)i,
+ children[i], children[i]->fops->getxattr, loc, name,
+ xdata);
+ return 0;
+ }
no_name:
- afr_read_txn (frame, this, local->loc.inode, afr_getxattr_wind,
- AFR_METADATA_TRANSACTION);
+ afr_read_txn(frame, this, local->loc.inode, afr_getxattr_wind,
+ AFR_METADATA_TRANSACTION);
- ret = 0;
+ ret = 0;
out:
- if (ret < 0)
- AFR_STACK_UNWIND (getxattr, frame, -1, op_errno, NULL, NULL);
- return 0;
+ if (ret < 0)
+ AFR_STACK_UNWIND(getxattr, frame, -1, op_errno, NULL, NULL);
+ return 0;
}
/* {{{ fgetxattr */
-
int32_t
-afr_fgetxattr_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *dict, dict_t *xdata)
+afr_fgetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = op_errno;
+ if (op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- if (dict)
- afr_filter_xattrs (dict);
+ if (dict)
+ afr_filter_xattrs(dict);
- AFR_STACK_UNWIND (fgetxattr, frame, op_ret, op_errno, dict, xdata);
+ AFR_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, dict, xdata);
- return 0;
+ return 0;
}
int
-afr_fgetxattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_fgetxattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (fgetxattr, frame, local->op_ret,
- local->op_errno, NULL, NULL);
- return 0;
- }
-
- STACK_WIND_COOKIE (frame, afr_fgetxattr_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->fgetxattr,
- local->fd, local->cont.getxattr.name,
- local->xdata_req);
- return 0;
-}
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ local = frame->local;
+ priv = this->private;
-static void
-afr_fgetxattr_all_subvols (xlator_t *this, call_frame_t *frame,
- fop_fgetxattr_cbk_t cbk)
-{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
- int call_count = 0;
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(fgetxattr, frame, local->op_ret, local->op_errno, NULL,
+ NULL);
+ return 0;
+ }
- priv = this->private;
+ STACK_WIND_COOKIE(frame, afr_fgetxattr_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->fgetxattr, local->fd,
+ local->cont.getxattr.name, local->xdata_req);
+ return 0;
+}
- local = frame->local;
- //local->call_count set in afr_local_init
- call_count = local->call_count;
-
- //If up-children count is 0, afr_local_init would have failed already
- //and the call would have unwound so not handling it here.
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->fgetxattr,
- local->fd, local->cont.getxattr.name,
- NULL);
- if (!--call_count)
- break;
- }
+static void
+afr_fgetxattr_all_subvols(xlator_t *this, call_frame_t *frame,
+ fop_fgetxattr_cbk_t cbk)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+ int call_count = 0;
+
+ priv = this->private;
+
+ local = frame->local;
+ // local->call_count set in afr_local_init
+ call_count = local->call_count;
+
+ // If up-children count is 0, afr_local_init would have failed already
+ // and the call would have unwound so not handling it here.
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(frame, cbk, (void *)(long)i, priv->children[i],
+ priv->children[i]->fops->fgetxattr, local->fd,
+ local->cont.getxattr.name, NULL);
+ if (!--call_count)
+ break;
}
+ }
- return;
+ return;
}
-
int
-afr_fgetxattr (call_frame_t *frame, xlator_t *this,
- fd_t *fd, const char *name, dict_t *xdata)
+afr_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int32_t op_errno = 0;
- fop_fgetxattr_cbk_t cbk = NULL;
-
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
-
- local->op = GF_FOP_FGETXATTR;
- local->fd = fd_ref (fd);
- if (name) {
- local->cont.getxattr.name = gf_strdup (name);
- if (!local->cont.getxattr.name) {
- op_errno = ENOMEM;
- goto out;
- }
- }
- if (xdata)
- local->xdata_req = dict_ref (xdata);
-
- /* pathinfo gets handled only in getxattr(), but we need to handle
- * lockinfo.
- * If we are doing fgetxattr with lockinfo as the key then we
- * collect information from all children.
- */
- if (afr_is_special_xattr (name, &cbk, 1)) {
- afr_fgetxattr_all_subvols (this, frame, cbk);
- return 0;
+ afr_local_t *local = NULL;
+ int32_t op_errno = 0;
+ fop_fgetxattr_cbk_t cbk = NULL;
+
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_FGETXATTR;
+ local->fd = fd_ref(fd);
+ if (name) {
+ local->cont.getxattr.name = gf_strdup(name);
+ if (!local->cont.getxattr.name) {
+ op_errno = ENOMEM;
+ goto out;
}
+ }
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+
+ /* pathinfo gets handled only in getxattr(), but we need to handle
+ * lockinfo.
+ * If we are doing fgetxattr with lockinfo as the key then we
+ * collect information from all children.
+ */
+ if (afr_is_special_xattr(name, &cbk, 1)) {
+ afr_fgetxattr_all_subvols(this, frame, cbk);
+ return 0;
+ }
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- afr_read_txn (frame, this, fd->inode, afr_fgetxattr_wind,
- AFR_METADATA_TRANSACTION);
+ afr_read_txn(frame, this, fd->inode, afr_fgetxattr_wind,
+ AFR_METADATA_TRANSACTION);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (fgetxattr, frame, -1, op_errno, NULL, NULL);
+ AFR_STACK_UNWIND(fgetxattr, frame, -1, op_errno, NULL, NULL);
- return 0;
+ return 0;
}
-
/* }}} */
/* {{{ readv */
int
-afr_readv_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- struct iovec *vector, int32_t count, struct iatt *buf,
- struct iobref *iobref, dict_t *xdata)
+afr_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iovec *vector, int32_t count,
+ struct iatt *buf, struct iobref *iobref, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = op_errno;
+ if (op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = op_errno;
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ afr_read_txn_continue(frame, this, (long)cookie);
+ return 0;
+ }
- AFR_STACK_UNWIND (readv, frame, op_ret, op_errno,
- vector, count, buf, iobref, xdata);
- return 0;
+ AFR_STACK_UNWIND(readv, frame, op_ret, op_errno, vector, count, buf, iobref,
+ xdata);
+ return 0;
}
-
int
-afr_readv_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_readv_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
-
- if (subvol == -1) {
- AFR_STACK_UNWIND (readv, frame, local->op_ret, local->op_errno,
- 0, 0, 0, 0, 0);
- return 0;
- }
-
- STACK_WIND_COOKIE (frame, afr_readv_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->readv,
- local->fd, local->cont.readv.size,
- local->cont.readv.offset, local->cont.readv.flags,
- local->xdata_req);
- return 0;
-}
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(readv, frame, local->op_ret, local->op_errno, 0, 0, 0,
+ 0, 0);
+ return 0;
+ }
+ STACK_WIND_COOKIE(
+ frame, afr_readv_cbk, (void *)(long)subvol, priv->children[subvol],
+ priv->children[subvol]->fops->readv, local->fd, local->cont.readv.size,
+ local->cont.readv.offset, local->cont.readv.flags, local->xdata_req);
+ return 0;
+}
int
-afr_readv (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
- off_t offset, uint32_t flags, dict_t *xdata)
+afr_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
{
- afr_local_t * local = NULL;
- int32_t op_errno = 0;
+ afr_local_t *local = NULL;
+ int32_t op_errno = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local->op = GF_FOP_READ;
- local->fd = fd_ref (fd);
- local->cont.readv.size = size;
- local->cont.readv.offset = offset;
- local->cont.readv.flags = flags;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->op = GF_FOP_READ;
+ local->fd = fd_ref(fd);
+ local->cont.readv.size = size;
+ local->cont.readv.offset = offset;
+ local->cont.readv.flags = flags;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- afr_read_txn (frame, this, fd->inode, afr_readv_wind,
- AFR_DATA_TRANSACTION);
+ afr_read_txn(frame, this, fd->inode, afr_readv_wind, AFR_DATA_TRANSACTION);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND(readv, frame, -1, op_errno, 0, 0, 0, 0, 0);
+ AFR_STACK_UNWIND(readv, frame, -1, op_errno, 0, 0, 0, 0, 0);
- return 0;
+ return 0;
}
/* }}} */
@@ -1792,77 +1822,73 @@ out:
/* {{{ seek */
int
-afr_seek_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, off_t offset, dict_t *xdata)
+afr_seek_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, off_t offset, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = op_errno;
-
- afr_read_txn_continue (frame, this, (long) cookie);
- return 0;
- }
+ if (op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = op_errno;
- AFR_STACK_UNWIND (seek, frame, op_ret, op_errno, offset, xdata);
+ afr_read_txn_continue(frame, this, (long)cookie);
return 0;
-}
+ }
+ AFR_STACK_UNWIND(seek, frame, op_ret, op_errno, offset, xdata);
+ return 0;
+}
int
-afr_seek_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_seek_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- if (subvol == -1) {
- AFR_STACK_UNWIND (seek, frame, local->op_ret, local->op_errno,
- 0, NULL);
- return 0;
- }
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_seek_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->seek,
- local->fd, local->cont.seek.offset,
- local->cont.seek.what, local->xdata_req);
+ if (subvol == -1) {
+ AFR_STACK_UNWIND(seek, frame, local->op_ret, local->op_errno, 0, NULL);
return 0;
-}
+ }
+ STACK_WIND_COOKIE(
+ frame, afr_seek_cbk, (void *)(long)subvol, priv->children[subvol],
+ priv->children[subvol]->fops->seek, local->fd, local->cont.seek.offset,
+ local->cont.seek.what, local->xdata_req);
+ return 0;
+}
int
-afr_seek (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
- gf_seek_what_t what, dict_t *xdata)
+afr_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ gf_seek_what_t what, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int32_t op_errno = 0;
+ afr_local_t *local = NULL;
+ int32_t op_errno = 0;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local->op = GF_FOP_SEEK;
- local->fd = fd_ref (fd);
- local->cont.seek.offset = offset;
- local->cont.seek.what = what;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->op = GF_FOP_SEEK;
+ local->fd = fd_ref(fd);
+ local->cont.seek.offset = offset;
+ local->cont.seek.what = what;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- afr_read_txn (frame, this, fd->inode, afr_seek_wind,
- AFR_DATA_TRANSACTION);
+ afr_read_txn(frame, this, fd->inode, afr_seek_wind, AFR_DATA_TRANSACTION);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (seek, frame, -1, op_errno, 0, NULL);
+ AFR_STACK_UNWIND(seek, frame, -1, op_errno, 0, NULL);
- return 0;
+ return 0;
}
/* }}} */
diff --git a/xlators/cluster/afr/src/afr-inode-read.h b/xlators/cluster/afr/src/afr-inode-read.h
index d128134ef2a..8c982bc7e6f 100644
--- a/xlators/cluster/afr/src/afr-inode-read.h
+++ b/xlators/cluster/afr/src/afr-inode-read.h
@@ -12,34 +12,34 @@
#define __INODE_READ_H__
int32_t
-afr_access (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int32_t mask, dict_t *xdata);
+afr_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask,
+ dict_t *xdata);
int32_t
-afr_stat (call_frame_t *frame, xlator_t *this,
- loc_t *loc, dict_t *xdata);
+afr_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata);
int32_t
-afr_fstat (call_frame_t *frame, xlator_t *this,
- fd_t *fd, dict_t *xdata);
+afr_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata);
int32_t
-afr_readlink (call_frame_t *frame, xlator_t *this,
- loc_t *loc, size_t size, dict_t *xdata);
+afr_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
+ dict_t *xdata);
int32_t
-afr_readv (call_frame_t *frame, xlator_t *this,
- fd_t *fd, size_t size, off_t offset, uint32_t flags, dict_t *xdata);
+afr_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata);
int32_t
-afr_getxattr (call_frame_t *frame, xlator_t *this,
- loc_t *loc, const char *name, dict_t *xdata);
+afr_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+ dict_t *xdata);
int32_t
-afr_fgetxattr (call_frame_t *frame, xlator_t *this,
- fd_t *fd, const char *name, dict_t *xdata);
-
+afr_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata);
int
-afr_handle_quota_size (call_frame_t *frame, xlator_t *this);
+afr_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ gf_seek_what_t what, dict_t *xdata);
+int
+afr_handle_quota_size(call_frame_t *frame, xlator_t *this);
#endif /* __INODE_READ_H__ */
diff --git a/xlators/cluster/afr/src/afr-inode-write.c b/xlators/cluster/afr/src/afr-inode-write.c
index e0928425c6a..1d6e4f3570a 100644
--- a/xlators/cluster/afr/src/afr-inode-write.c
+++ b/xlators/cluster/afr/src/afr-inode-write.c
@@ -8,772 +8,779 @@
cases as published by the Free Software Foundation.
*/
-
-#include <libgen.h>
#include <unistd.h>
-#include <fnmatch.h>
#include <sys/time.h>
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "afr.h"
-#include "dict.h"
-#include "xlator.h"
-#include "hashfn.h"
-#include "logging.h"
-#include "stack.h"
-#include "list.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "common-utils.h"
-#include "compat-errno.h"
-#include "compat.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/compat.h>
#include "protocol-common.h"
-#include "byte-order.h"
+#include <glusterfs/byte-order.h>
#include "afr-transaction.h"
#include "afr-self-heal.h"
#include "afr-messages.h"
static void
-__afr_inode_write_finalize (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int read_subvol = 0;
- int i = 0;
- afr_read_subvol_args_t args = {0,};
- struct iatt *stbuf = NULL;
- int ret = 0;
-
- local = frame->local;
- priv = this->private;
-
- /*This code needs to stay till DHT sends fops on linked
- * inodes*/
- if (local->inode && !inode_is_linked (local->inode)) {
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret == -1)
- continue;
- if (!gf_uuid_is_null
- (local->replies[i].poststat.ia_gfid)) {
- gf_uuid_copy (args.gfid,
- local->replies[i].poststat.ia_gfid);
- args.ia_type =
- local->replies[i].poststat.ia_type;
- break;
- } else {
- ret = dict_get_bin (local->replies[i].xdata,
- DHT_IATT_IN_XDATA_KEY,
- (void **) &stbuf);
- if (ret)
- continue;
- gf_uuid_copy (args.gfid, stbuf->ia_gfid);
- args.ia_type = stbuf->ia_type;
- break;
- }
- }
+__afr_inode_write_finalize(call_frame_t *frame, xlator_t *this)
+{
+ int i = 0;
+ int ret = 0;
+ int read_subvol = 0;
+ struct iatt *stbuf = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_read_subvol_args_t args = {
+ 0,
+ };
+
+ local = frame->local;
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, local->inode, out);
+
+ /*This code needs to stay till DHT sends fops on linked
+ * inodes*/
+ if (!inode_is_linked(local->inode)) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret == -1)
+ continue;
+ if (!gf_uuid_is_null(local->replies[i].poststat.ia_gfid)) {
+ gf_uuid_copy(args.gfid, local->replies[i].poststat.ia_gfid);
+ args.ia_type = local->replies[i].poststat.ia_type;
+ break;
+ } else {
+ ret = dict_get_bin(local->replies[i].xdata,
+ DHT_IATT_IN_XDATA_KEY, (void **)&stbuf);
+ if (ret)
+ continue;
+ gf_uuid_copy(args.gfid, stbuf->ia_gfid);
+ args.ia_type = stbuf->ia_type;
+ break;
+ }
+ }
+ }
+
+ if (local->transaction.type == AFR_METADATA_TRANSACTION) {
+ read_subvol = afr_metadata_subvol_get(local->inode, this, NULL,
+ local->readable, NULL, &args);
+ } else {
+ read_subvol = afr_data_subvol_get(local->inode, this, NULL,
+ local->readable, NULL, &args);
+ }
+
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
+ afr_pick_error_xdata(local, priv, local->inode, local->readable, NULL,
+ NULL);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+ if (local->replies[i].op_ret < 0)
+ continue;
+
+ /* Order of checks in the compound conditional
+ below is important.
+
+ - Highest precedence: largest op_ret
+ - Next precedence: if all op_rets are equal, read subvol
+ - Least precedence: any succeeded subvol
+ */
+ if ((local->op_ret < local->replies[i].op_ret) ||
+ ((local->op_ret == local->replies[i].op_ret) &&
+ (i == read_subvol))) {
+ local->op_ret = local->replies[i].op_ret;
+ local->op_errno = local->replies[i].op_errno;
+
+ local->cont.inode_wfop.prebuf = local->replies[i].prestat;
+ local->cont.inode_wfop.postbuf = local->replies[i].poststat;
+
+ if (local->replies[i].xdata) {
+ if (local->xdata_rsp)
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ }
+ if (local->replies[i].xattr) {
+ if (local->xattr_rsp)
+ dict_unref(local->xattr_rsp);
+ local->xattr_rsp = dict_ref(local->replies[i].xattr);
+ }
}
+ }
- if (local->inode) {
- if (local->transaction.type == AFR_METADATA_TRANSACTION)
- read_subvol = afr_metadata_subvol_get (local->inode, this,
- NULL, NULL,
- &args);
- else
- read_subvol = afr_data_subvol_get (local->inode, this,
- NULL, NULL, &args);
- }
-
- local->op_ret = -1;
- local->op_errno = afr_final_errno (local, priv);
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret < 0) {
- afr_inode_read_subvol_reset (local->inode, this);
- continue;
- }
-
- /* Order of checks in the compound conditional
- below is important.
-
- - Highest precedence: largest op_ret
- - Next precendence: if all op_rets are equal, read subvol
- - Least precedence: any succeeded subvol
- */
- if ((local->op_ret < local->replies[i].op_ret) ||
- ((local->op_ret == local->replies[i].op_ret) &&
- (i == read_subvol))) {
-
- local->op_ret = local->replies[i].op_ret;
- local->op_errno = local->replies[i].op_errno;
-
- local->cont.inode_wfop.prebuf =
- local->replies[i].prestat;
- local->cont.inode_wfop.postbuf =
- local->replies[i].poststat;
-
- if (local->replies[i].xdata) {
- if (local->xdata_rsp)
- dict_unref (local->xdata_rsp);
- local->xdata_rsp =
- dict_ref (local->replies[i].xdata);
- }
- if (local->replies[i].xattr) {
- if (local->xattr_rsp)
- dict_unref (local->xattr_rsp);
- local->xattr_rsp =
- dict_ref (local->replies[i].xattr);
- }
- }
- }
-
- afr_txn_arbitrate_fop_cbk (frame, this);
+ afr_set_in_flight_sb_status(this, frame, local->inode);
+out:
+ return;
}
-
static void
-__afr_inode_write_fill (call_frame_t *frame, xlator_t *this, int child_index,
- int op_ret, int op_errno,
- struct iatt *prebuf, struct iatt *postbuf,
- dict_t *xattr, dict_t *xdata)
+__afr_inode_write_fill(call_frame_t *frame, xlator_t *this, int child_index,
+ int op_ret, int op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xattr, dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- local->replies[child_index].valid = 1;
+ local->replies[child_index].valid = 1;
- if (AFR_IS_ARBITER_BRICK(priv, child_index) && op_ret == 1)
- op_ret = iov_length (local->cont.writev.vector,
- local->cont.writev.count);
+ if (AFR_IS_ARBITER_BRICK(priv, child_index) && op_ret == 1)
+ op_ret = iov_length(local->cont.writev.vector,
+ local->cont.writev.count);
- local->replies[child_index].op_ret = op_ret;
- local->replies[child_index].op_errno = op_errno;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+ if (xdata)
+ local->replies[child_index].xdata = dict_ref(xdata);
- if (op_ret >= 0) {
- if (prebuf)
- local->replies[child_index].prestat = *prebuf;
- if (postbuf)
- local->replies[child_index].poststat = *postbuf;
- if (xattr)
- local->replies[child_index].xattr = dict_ref (xattr);
- if (xdata)
- local->replies[child_index].xdata = dict_ref (xdata);
- } else {
- afr_transaction_fop_failed (frame, this, child_index);
- }
+ if (op_ret >= 0) {
+ if (prebuf)
+ local->replies[child_index].prestat = *prebuf;
+ if (postbuf)
+ local->replies[child_index].poststat = *postbuf;
+ if (xattr)
+ local->replies[child_index].xattr = dict_ref(xattr);
+ } else {
+ afr_transaction_fop_failed(frame, this, child_index);
+ }
- return;
+ return;
}
-
static int
-__afr_inode_write_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xattr, dict_t *xdata)
-{
- afr_local_t *local = NULL;
- int child_index = (long) cookie;
- int call_count = -1;
-
- local = frame->local;
-
- LOCK (&frame->lock);
- {
- __afr_inode_write_fill (frame, this, child_index, op_ret,
- op_errno, prebuf, postbuf, xattr,
- xdata);
+__afr_inode_write_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xattr, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int child_index = (long)cookie;
+ int call_count = -1;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ LOCK(&frame->lock);
+ {
+ __afr_inode_write_fill(frame, this, child_index, op_ret, op_errno,
+ prebuf, postbuf, xattr, xdata);
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
+
+ if (call_count == 0) {
+ __afr_inode_write_finalize(frame, this);
+
+ if (afr_txn_nothing_failed(frame, this)) {
+ /*if it did pre-op, it will do post-op changing ctime*/
+ if (priv->consistent_metadata && afr_needs_changelog_update(local))
+ afr_zero_fill_stat(local);
+ local->transaction.unwind(frame, this);
}
- UNLOCK (&frame->lock);
-
- call_count = afr_frame_return (frame);
-
- if (call_count == 0) {
- __afr_inode_write_finalize (frame, this);
- if (afr_txn_nothing_failed (frame, this))
- local->transaction.unwind (frame, this);
+ afr_transaction_resume(frame, this);
+ }
- local->transaction.resume (frame, this);
- }
-
- return 0;
+ return 0;
}
/* {{{ writev */
void
-afr_writev_copy_outvars (call_frame_t *src_frame, call_frame_t *dst_frame)
+afr_writev_copy_outvars(call_frame_t *src_frame, call_frame_t *dst_frame)
{
- afr_local_t *src_local = NULL;
- afr_local_t *dst_local = NULL;
+ afr_local_t *src_local = NULL;
+ afr_local_t *dst_local = NULL;
- src_local = src_frame->local;
- dst_local = dst_frame->local;
+ src_local = src_frame->local;
+ dst_local = dst_frame->local;
- dst_local->op_ret = src_local->op_ret;
- dst_local->op_errno = src_local->op_errno;
- dst_local->cont.inode_wfop.prebuf = src_local->cont.inode_wfop.prebuf;
- dst_local->cont.inode_wfop.postbuf = src_local->cont.inode_wfop.postbuf;
- if (src_local->xdata_rsp)
- dst_local->xdata_rsp = dict_ref (src_local->xdata_rsp);
+ dst_local->op_ret = src_local->op_ret;
+ dst_local->op_errno = src_local->op_errno;
+ dst_local->cont.inode_wfop.prebuf = src_local->cont.inode_wfop.prebuf;
+ dst_local->cont.inode_wfop.postbuf = src_local->cont.inode_wfop.postbuf;
+ if (src_local->xdata_rsp)
+ dst_local->xdata_rsp = dict_ref(src_local->xdata_rsp);
}
void
-afr_writev_unwind (call_frame_t *frame, xlator_t *this)
+afr_writev_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- local = frame->local;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = this->private;
- AFR_STACK_UNWIND (writev, frame,
- local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf,
- local->xdata_rsp);
-}
+ local = frame->local;
+
+ if (priv->consistent_metadata)
+ afr_zero_fill_stat(local);
+ AFR_STACK_UNWIND(writev, frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+}
int
-afr_transaction_writev_unwind (call_frame_t *frame, xlator_t *this)
+afr_transaction_writev_unwind(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *fop_frame = NULL;
+ call_frame_t *fop_frame = NULL;
- fop_frame = afr_transaction_detach_fop_frame (frame);
+ fop_frame = afr_transaction_detach_fop_frame(frame);
- if (fop_frame) {
- afr_writev_copy_outvars (frame, fop_frame);
- afr_writev_unwind (fop_frame, this);
- }
- return 0;
+ if (fop_frame) {
+ afr_writev_copy_outvars(frame, fop_frame);
+ afr_writev_unwind(fop_frame, this);
+ }
+ return 0;
}
static void
-afr_writev_handle_short_writes (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
-
- local = frame->local;
- priv = this->private;
- /*
- * We already have the best case result of the writev calls staged
- * as the return value. Any writev that returns some value less
- * than the best case is now out of sync, so mark the fop as
- * failed. Note that fops that have returned with errors have
- * already been marked as failed.
- */
- for (i = 0; i < priv->child_count; i++) {
- if ((!local->replies[i].valid) ||
- (local->replies[i].op_ret == -1))
- continue;
-
- if (local->replies[i].op_ret < local->op_ret)
- afr_transaction_fop_failed (frame, this, i);
- }
+afr_writev_handle_short_writes(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+
+ local = frame->local;
+ priv = this->private;
+ /*
+ * We already have the best case result of the writev calls staged
+ * as the return value. Any writev that returns some value less
+ * than the best case is now out of sync, so mark the fop as
+ * failed. Note that fops that have returned with errors have
+ * already been marked as failed.
+ */
+ for (i = 0; i < priv->child_count; i++) {
+ if ((!local->replies[i].valid) || (local->replies[i].op_ret == -1))
+ continue;
+
+ if (local->replies[i].op_ret < local->op_ret)
+ afr_transaction_fop_failed(frame, this, i);
+ }
}
-int
-afr_writev_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+void
+afr_inode_write_fill(call_frame_t *frame, xlator_t *this, int child_index,
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
struct iatt *postbuf, dict_t *xdata)
{
- afr_local_t * local = NULL;
- call_frame_t *fop_frame = NULL;
- int child_index = (long) cookie;
- int call_count = -1;
- int ret = 0;
- uint32_t open_fd_count = 0;
- uint32_t write_is_append = 0;
-
- local = frame->local;
-
- LOCK (&frame->lock);
- {
- __afr_inode_write_fill (frame, this, child_index, op_ret,
- op_errno, prebuf, postbuf, NULL, xdata);
- if (op_ret == -1 || !xdata)
- goto unlock;
-
- write_is_append = 0;
- ret = dict_get_uint32 (xdata, GLUSTERFS_WRITE_IS_APPEND,
- &write_is_append);
- if (ret || !write_is_append)
- local->append_write = _gf_false;
-
- ret = dict_get_uint32 (xdata, GLUSTERFS_OPEN_FD_COUNT,
- &open_fd_count);
- if (ret == -1)
- goto unlock;
- if ((open_fd_count > local->open_fd_count)) {
- local->open_fd_count = open_fd_count;
- local->update_open_fd_count = _gf_true;
- }
+ int ret = 0;
+ afr_local_t *local = frame->local;
+ uint32_t open_fd_count = 0;
+ uint32_t write_is_append = 0;
+ int32_t num_inodelks = 0;
+
+ LOCK(&frame->lock);
+ {
+ __afr_inode_write_fill(frame, this, child_index, op_ret, op_errno,
+ prebuf, postbuf, NULL, xdata);
+ if (op_ret == -1 || !xdata)
+ goto unlock;
+
+ write_is_append = 0;
+ ret = dict_get_uint32(xdata, GLUSTERFS_WRITE_IS_APPEND,
+ &write_is_append);
+ if (ret || !write_is_append)
+ local->append_write = _gf_false;
+
+ ret = dict_get_uint32(xdata, GLUSTERFS_ACTIVE_FD_COUNT, &open_fd_count);
+ if (ret < 0)
+ goto unlock;
+ if (open_fd_count > local->open_fd_count) {
+ local->open_fd_count = open_fd_count;
+ local->update_open_fd_count = _gf_true;
}
-unlock:
- UNLOCK (&frame->lock);
-
- call_count = afr_frame_return (frame);
-
- if (call_count == 0) {
- if (!local->stable_write && !local->append_write)
- /* An appended write removes the necessity to
- fsync() the file. This is because self-heal
- has the logic to check for larger file when
- the xattrs are not reliably pointing at
- a stale file.
- */
- afr_fd_report_unstable_write (this, local->fd);
-
- __afr_inode_write_finalize (frame, this);
-
- afr_writev_handle_short_writes (frame, this);
-
- if (local->update_open_fd_count)
- afr_handle_open_fd_count (frame, this);
-
- if (!afr_txn_nothing_failed (frame, this)) {
- //Don't unwind until post-op is complete
- local->transaction.resume (frame, this);
- } else {
- /*
- * Generally inode-write fops do transaction.unwind then
- * transaction.resume, but writev needs to make sure that
- * delayed post-op frame is placed in fdctx before unwind
- * happens. This prevents the race of flush doing the
- * changelog wakeup first in fuse thread and then this
- * writev placing its delayed post-op frame in fdctx.
- * This helps flush make sure all the delayed post-ops are
- * completed.
- */
-
- fop_frame = afr_transaction_detach_fop_frame (frame);
- afr_writev_copy_outvars (frame, fop_frame);
- local->transaction.resume (frame, this);
- afr_writev_unwind (fop_frame, this);
- }
+
+ ret = dict_get_int32_sizen(xdata, GLUSTERFS_INODELK_COUNT,
+ &num_inodelks);
+ if (ret < 0)
+ goto unlock;
+ if (num_inodelks > local->num_inodelks) {
+ local->num_inodelks = num_inodelks;
+ local->update_num_inodelks = _gf_true;
}
- return 0;
+ }
+unlock:
+ UNLOCK(&frame->lock);
}
-static int
-afr_arbiter_writev_wind (call_frame_t *frame, xlator_t *this, int subvol)
+void
+afr_process_post_writev(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = frame->local;
- afr_private_t *priv = this->private;
- static char byte = 0xFF;
- static struct iovec vector = {&byte, 1};
- int32_t count = 1;
+ afr_local_t *local = NULL;
+ afr_lock_t *lock = NULL;
- STACK_WIND_COOKIE (frame, afr_writev_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->writev,
- local->fd, &vector, count, local->cont.writev.offset,
- local->cont.writev.flags, local->cont.writev.iobref,
- local->xdata_req);
+ local = frame->local;
- return 0;
+ if (!local->stable_write && !local->append_write)
+ /* An appended write removes the necessity to
+ fsync() the file. This is because self-heal
+ has the logic to check for larger file when
+ the xattrs are not reliably pointing at
+ a stale file.
+ */
+ afr_fd_report_unstable_write(this, local);
+
+ __afr_inode_write_finalize(frame, this);
+
+ afr_writev_handle_short_writes(frame, this);
+
+ if (local->update_open_fd_count)
+ local->inode_ctx->open_fd_count = local->open_fd_count;
+ if (local->update_num_inodelks &&
+ local->transaction.type == AFR_DATA_TRANSACTION) {
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ lock->num_inodelks = local->num_inodelks;
+ }
}
int
-afr_writev_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_writev_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ call_frame_t *fop_frame = NULL;
+ int child_index = (long)cookie;
+ int call_count = -1;
+
+ afr_inode_write_fill(frame, this, child_index, op_ret, op_errno, prebuf,
+ postbuf, xdata);
- local = frame->local;
- priv = this->private;
+ call_count = afr_frame_return(frame);
- if (AFR_IS_ARBITER_BRICK(priv, subvol)) {
- afr_arbiter_writev_wind (frame, this, subvol);
- return 0;
+ if (call_count == 0) {
+ afr_process_post_writev(frame, this);
+
+ if (!afr_txn_nothing_failed(frame, this)) {
+ // Don't unwind until post-op is complete
+ afr_transaction_resume(frame, this);
+ } else {
+ /*
+ * Generally inode-write fops do transaction.unwind then
+ * transaction.resume, but writev needs to make sure that
+ * delayed post-op frame is placed in fdctx before unwind
+ * happens. This prevents the race of flush doing the
+ * changelog wakeup first in fuse thread and then this
+ * writev placing its delayed post-op frame in fdctx.
+ * This helps flush make sure all the delayed post-ops are
+ * completed.
+ */
+
+ fop_frame = afr_transaction_detach_fop_frame(frame);
+ afr_writev_copy_outvars(frame, fop_frame);
+ afr_transaction_resume(frame, this);
+ afr_writev_unwind(fop_frame, this);
}
+ }
+ return 0;
+}
- STACK_WIND_COOKIE (frame, afr_writev_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->writev,
- local->fd, local->cont.writev.vector,
- local->cont.writev.count, local->cont.writev.offset,
- local->cont.writev.flags, local->cont.writev.iobref,
- local->xdata_req);
- return 0;
+static int
+afr_arbiter_writev_wind(call_frame_t *frame, xlator_t *this, int subvol)
+{
+ afr_local_t *local = frame->local;
+ afr_private_t *priv = this->private;
+ static char byte = 0xFF;
+ static struct iovec vector = {&byte, 1};
+ int32_t count = 1;
+
+ STACK_WIND_COOKIE(
+ frame, afr_writev_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol], priv->children[subvol]->fops->writev, local->fd,
+ &vector, count, local->cont.writev.offset, local->cont.writev.flags,
+ local->cont.writev.iobref, local->xdata_req);
+
+ return 0;
}
+int
+afr_writev_wind(call_frame_t *frame, xlator_t *this, int subvol)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (AFR_IS_ARBITER_BRICK(priv, subvol)) {
+ afr_arbiter_writev_wind(frame, this, subvol);
+ return 0;
+ }
+
+ STACK_WIND_COOKIE(frame, afr_writev_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->writev, local->fd,
+ local->cont.writev.vector, local->cont.writev.count,
+ local->cont.writev.offset, local->cont.writev.flags,
+ local->cont.writev.iobref, local->xdata_req);
+ return 0;
+}
int
-afr_do_writev (call_frame_t *frame, xlator_t *this)
+afr_do_writev(call_frame_t *frame, xlator_t *this)
{
- call_frame_t *transaction_frame = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ call_frame_t *transaction_frame = NULL;
+ afr_local_t *local = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = frame->local;
- priv = this->private;
- transaction_frame->local = local;
- frame->local = NULL;
-
- if (!AFR_FRAME_INIT (frame, op_errno))
- goto out;
-
- local->op = GF_FOP_WRITE;
-
- local->transaction.wind = afr_writev_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_transaction_writev_unwind;
-
- local->transaction.main_frame = frame;
-
- if (local->fd->flags & O_APPEND) {
- /*
- * Backend vfs ignores the 'offset' for append mode fd so
- * locking just the region provided for the writev does not
- * give consistency guarantee. The actual write may happen at a
- * completely different range than the one provided by the
- * offset, len in the fop. So lock the entire file.
- */
- local->transaction.start = 0;
- local->transaction.len = 0;
- } else {
- local->transaction.start = local->cont.writev.offset;
- local->transaction.len = iov_length (local->cont.writev.vector,
- local->cont.writev.count);
-
- /*Lock entire file to avoid network split brains.*/
- if (priv->arbiter_count == 1) {
- local->transaction.start = 0;
- local->transaction.len = 0;
- }
- }
+ local = frame->local;
+ transaction_frame->local = local;
+ frame->local = NULL;
- ret = afr_transaction (transaction_frame, this, AFR_DATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ if (!AFR_FRAME_INIT(frame, op_errno))
+ goto out;
+
+ local->op = GF_FOP_WRITE;
+
+ local->transaction.wind = afr_writev_wind;
+ local->transaction.unwind = afr_transaction_writev_unwind;
+
+ local->transaction.main_frame = frame;
- return 0;
+ if (local->fd->flags & O_APPEND) {
+ /*
+ * Backend vfs ignores the 'offset' for append mode fd so
+ * locking just the region provided for the writev does not
+ * give consistency guarantee. The actual write may happen at a
+ * completely different range than the one provided by the
+ * offset, len in the fop. So lock the entire file.
+ */
+ local->transaction.start = 0;
+ local->transaction.len = 0;
+ } else {
+ local->transaction.start = local->cont.writev.offset;
+ local->transaction.len = iov_length(local->cont.writev.vector,
+ local->cont.writev.count);
+ }
+
+ ret = afr_transaction(transaction_frame, this, AFR_DATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (writev, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(writev, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
-
int
-afr_writev (call_frame_t *frame, xlator_t *this, fd_t *fd,
- struct iovec *vector, int32_t count, off_t offset,
- uint32_t flags, struct iobref *iobref, dict_t *xdata)
+afr_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *vector,
+ int32_t count, off_t offset, uint32_t flags, struct iobref *iobref,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ int op_errno = ENOMEM;
+ int ret = -1;
+
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local->cont.writev.vector = iov_dup(vector, count);
+ if (!local->cont.writev.vector)
+ goto out;
+ local->cont.writev.count = count;
+ local->cont.writev.offset = offset;
+ local->cont.writev.flags = flags;
+ local->cont.writev.iobref = iobref_ref(iobref);
- local->cont.writev.vector = iov_dup (vector, count);
- if (!local->cont.writev.vector)
- goto out;
- local->cont.writev.count = count;
- local->cont.writev.offset = offset;
- local->cont.writev.flags = flags;
- local->cont.writev.iobref = iobref_ref (iobref);
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ if (!local->xdata_req)
+ goto out;
- if (!local->xdata_req)
- goto out;
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ if (dict_set_uint32(local->xdata_req, GLUSTERFS_ACTIVE_FD_COUNT, 4)) {
+ op_errno = ENOMEM;
+ goto out;
+ }
- if (dict_set_uint32 (local->xdata_req, GLUSTERFS_OPEN_FD_COUNT, 4)) {
- op_errno = ENOMEM;
- goto out;
- }
+ if (dict_set_str_sizen(local->xdata_req, GLUSTERFS_INODELK_DOM_COUNT,
+ this->name)) {
+ op_errno = ENOMEM;
+ goto out;
+ }
- if (dict_set_uint32 (local->xdata_req, GLUSTERFS_WRITE_IS_APPEND, 4)) {
- op_errno = ENOMEM;
- goto out;
- }
+ if (dict_set_uint32(local->xdata_req, GLUSTERFS_WRITE_IS_APPEND, 4)) {
+ op_errno = ENOMEM;
+ goto out;
+ }
- /* Set append_write to be true speculatively. If on any
- server it turns not be true, we unset it in the
- callback.
- */
- local->append_write = _gf_true;
+ /* Set append_write to be true speculatively. If on any
+ server it turns not be true, we unset it in the
+ callback.
+ */
+ local->append_write = _gf_true;
- /* detect here, but set it in writev_wind_cbk *after* the unstable
- write is performed
- */
- local->stable_write = !!((fd->flags|flags)&(O_SYNC|O_DSYNC));
+ /* detect here, but set it in writev_wind_cbk *after* the unstable
+ write is performed
+ */
+ local->stable_write = !!((fd->flags | flags) & (O_SYNC | O_DSYNC));
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- afr_do_writev (frame, this);
+ afr_do_writev(frame, this);
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (writev, frame, -1, op_errno, NULL, NULL, NULL);
+ AFR_STACK_UNWIND(writev, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ return 0;
}
-
/* }}} */
/* {{{ truncate */
int
-afr_truncate_unwind (call_frame_t *frame, xlator_t *this)
+afr_truncate_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
-
- local = frame->local;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (truncate, main_frame, local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(truncate, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ return 0;
+}
int
-afr_truncate_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
+afr_truncate_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret == 0 && prebuf->ia_size != postbuf->ia_size)
- local->stable_write = _gf_false;
+ if (op_ret == 0 && prebuf->ia_size != postbuf->ia_size)
+ local->stable_write = _gf_false;
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- prebuf, postbuf, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, prebuf,
+ postbuf, NULL, xdata);
}
-
int
-afr_truncate_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_truncate_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_truncate_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->truncate,
- &local->loc, local->cont.truncate.offset,
- local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_truncate_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->truncate, &local->loc,
+ local->cont.truncate.offset, local->xdata_req);
+ return 0;
}
-
int
-afr_truncate (call_frame_t *frame, xlator_t *this,
- loc_t *loc, off_t offset, dict_t *xdata)
+afr_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
{
- afr_local_t * local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.truncate.offset = offset;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ local->cont.truncate.offset = offset;
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->transaction.wind = afr_truncate_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_truncate_unwind;
+ local->transaction.wind = afr_truncate_wind;
+ local->transaction.unwind = afr_truncate_unwind;
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
+ loc_copy(&local->loc, loc);
+ ret = afr_set_inode_local(this, local, loc->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_TRUNCATE;
+ local->op = GF_FOP_TRUNCATE;
- local->transaction.main_frame = frame;
- local->transaction.start = offset;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = offset;
+ local->transaction.len = 0;
- /* Set it true speculatively, will get reset in afr_truncate_wind_cbk
- if truncate was not a NOP */
- local->stable_write = _gf_true;
+ /* Set it true speculatively, will get reset in afr_truncate_wind_cbk
+ if truncate was not a NOP */
+ local->stable_write = _gf_true;
- ret = afr_transaction (transaction_frame, this, AFR_DATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_DATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (truncate, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(truncate, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
-
/* }}} */
/* {{{ ftruncate */
-
int
-afr_ftruncate_unwind (call_frame_t *frame, xlator_t *this)
+afr_ftruncate_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (ftruncate, main_frame, local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(ftruncate, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ return 0;
+}
int
-afr_ftruncate_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
+afr_ftruncate_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (op_ret == 0 && prebuf->ia_size != postbuf->ia_size)
- local->stable_write = _gf_false;
+ if (op_ret == 0 && prebuf->ia_size != postbuf->ia_size)
+ local->stable_write = _gf_false;
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- prebuf, postbuf, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, prebuf,
+ postbuf, NULL, xdata);
}
-
int
-afr_ftruncate_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_ftruncate_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_ftruncate_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->ftruncate,
- local->fd, local->cont.ftruncate.offset,
- local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_ftruncate_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->ftruncate, local->fd,
+ local->cont.ftruncate.offset, local->xdata_req);
+ return 0;
}
-
int
-afr_ftruncate (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
- dict_t *xdata)
+afr_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.ftruncate.offset = offset;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ local->cont.ftruncate.offset = offset;
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_FTRUNCATE;
+ local->op = GF_FOP_FTRUNCATE;
- local->transaction.wind = afr_ftruncate_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_ftruncate_unwind;
+ local->transaction.wind = afr_ftruncate_wind;
+ local->transaction.unwind = afr_ftruncate_unwind;
- local->transaction.main_frame = frame;
+ local->transaction.main_frame = frame;
- local->transaction.start = local->cont.ftruncate.offset;
- local->transaction.len = 0;
+ local->transaction.start = local->cont.ftruncate.offset;
+ local->transaction.len = 0;
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- /* Set it true speculatively, will get reset in afr_ftruncate_wind_cbk
- if truncate was not a NOP */
- local->stable_write = _gf_true;
+ /* Set it true speculatively, will get reset in afr_ftruncate_wind_cbk
+ if truncate was not a NOP */
+ local->stable_write = _gf_true;
- ret = afr_transaction (transaction_frame, this, AFR_DATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_DATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- AFR_STACK_UNWIND (ftruncate, frame, -1, op_errno, NULL, NULL, NULL);
+ AFR_STACK_UNWIND(ftruncate, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ return 0;
}
/* }}} */
@@ -781,1701 +788,1778 @@ out:
/* {{{ setattr */
int
-afr_setattr_unwind (call_frame_t *frame, xlator_t *this)
+afr_setattr_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- call_frame_t *main_frame = NULL;
-
- local = frame->local;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (setattr, main_frame, local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf,
- local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(setattr, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ return 0;
+}
int
-afr_setattr_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno,
- struct iatt *preop, struct iatt *postop, dict_t *xdata)
+afr_setattr_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *preop,
+ struct iatt *postop, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- preop, postop, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, preop,
+ postop, NULL, xdata);
}
-
int
-afr_setattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_setattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_setattr_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->setattr,
- &local->loc, &local->cont.setattr.in_buf,
- local->cont.setattr.valid, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_setattr_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->setattr, &local->loc,
+ &local->cont.setattr.in_buf, local->cont.setattr.valid,
+ local->xdata_req);
+ return 0;
}
-
int
-afr_setattr (call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *buf,
- int32_t valid, dict_t *xdata)
+afr_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *buf,
+ int32_t valid, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.setattr.in_buf = *buf;
- local->cont.setattr.valid = valid;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ local->cont.setattr.in_buf = *buf;
+ local->cont.setattr.valid = valid;
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->transaction.wind = afr_setattr_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_setattr_unwind;
+ local->transaction.wind = afr_setattr_wind;
+ local->transaction.unwind = afr_setattr_unwind;
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
+ loc_copy(&local->loc, loc);
+ ret = afr_set_inode_local(this, local, loc->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_SETATTR;
+ local->op = GF_FOP_SETATTR;
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- ret = afr_transaction (transaction_frame, this, AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (setattr, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(setattr, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
/* {{{ fsetattr */
int
-afr_fsetattr_unwind (call_frame_t *frame, xlator_t *this)
+afr_fsetattr_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
-
- local = frame->local;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (fsetattr, main_frame, local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(fsetattr, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ return 0;
+}
int
-afr_fsetattr_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno,
- struct iatt *preop, struct iatt *postop, dict_t *xdata)
+afr_fsetattr_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preop,
+ struct iatt *postop, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- preop, postop, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, preop,
+ postop, NULL, xdata);
}
-
int
-afr_fsetattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_fsetattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_fsetattr_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->fsetattr,
- local->fd, &local->cont.fsetattr.in_buf,
- local->cont.fsetattr.valid, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_fsetattr_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->fsetattr, local->fd,
+ &local->cont.fsetattr.in_buf, local->cont.fsetattr.valid,
+ local->xdata_req);
+ return 0;
}
-
int
-afr_fsetattr (call_frame_t *frame, xlator_t *this,
- fd_t *fd, struct iatt *buf, int32_t valid, dict_t *xdata)
+afr_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iatt *buf,
+ int32_t valid, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.fsetattr.in_buf = *buf;
- local->cont.fsetattr.valid = valid;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ local->cont.fsetattr.in_buf = *buf;
+ local->cont.fsetattr.valid = valid;
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->transaction.wind = afr_fsetattr_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_fsetattr_unwind;
+ local->transaction.wind = afr_fsetattr_wind;
+ local->transaction.unwind = afr_fsetattr_unwind;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_FSETATTR;
+ local->op = GF_FOP_FSETATTR;
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- ret = afr_transaction (transaction_frame, this, AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (fsetattr, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(fsetattr, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
-
/* {{{ setxattr */
-
int
-afr_setxattr_unwind (call_frame_t *frame, xlator_t *this)
+afr_setxattr_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
-
- local = frame->local;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (setxattr, main_frame, local->op_ret, local->op_errno,
- local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(setxattr, main_frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return 0;
+}
int
-afr_setxattr_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_setxattr_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- NULL, NULL, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ NULL, NULL, xdata);
}
-
int
-afr_setxattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_setxattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_setxattr_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->setxattr,
- &local->loc, local->cont.setxattr.dict,
- local->cont.setxattr.flags, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_setxattr_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->setxattr, &local->loc,
+ local->cont.setxattr.dict, local->cont.setxattr.flags,
+ local->xdata_req);
+ return 0;
}
int
-afr_emptyb_set_pending_changelog_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int op_ret, int op_errno,
- dict_t *xattr, dict_t *xdata)
+afr_emptyb_set_pending_changelog_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int op_ret, int op_errno,
+ dict_t *xattr, dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i, ret = 0;
- char *op_type = NULL;
-
- local = frame->local;
- priv = this->private;
- i = (long) cookie;
-
- local->replies[i].valid = 1;
- local->replies[i].op_ret = op_ret;
- local->replies[i].op_errno = op_errno;
-
- ret = dict_get_str (local->xdata_req, "replicate-brick-op", &op_type);
- if (ret)
- goto out;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i, ret = 0;
+ char *op_type = NULL;
- gf_msg (this->name, op_ret ? GF_LOG_ERROR : GF_LOG_INFO,
- op_ret ? op_errno : 0,
- afr_get_msg_id (op_type),
- "Set of pending xattr %s on"
- " %s.", op_ret ? "failed" : "succeeded",
- priv->children[i]->name);
+ local = frame->local;
+ priv = this->private;
+ i = (long)cookie;
-out:
- syncbarrier_wake (&local->barrier);
- return 0;
-}
-
-int
-afr_emptyb_set_pending_changelog (call_frame_t *frame, xlator_t *this,
- unsigned char *locked_nodes)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int ret = 0, i = 0;
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
- local = frame->local;
- priv = this->private;
+ ret = dict_get_str_sizen(local->xdata_req, "replicate-brick-op", &op_type);
+ if (ret)
+ goto out;
- AFR_ONLIST (locked_nodes, frame, afr_emptyb_set_pending_changelog_cbk,
- xattrop, &local->loc, GF_XATTROP_ADD_ARRAY,
- local->xattr_req, NULL);
+ gf_smsg(this->name, op_ret ? GF_LOG_ERROR : GF_LOG_INFO,
+ op_ret ? op_errno : 0, AFR_MSG_SET_PEND_XATTR, "name=%s",
+ priv->children[i]->name, "op_ret=%s",
+ op_ret ? "failed" : "succeeded", NULL);
- /* It is sufficient if xattrop was successful on one child */
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
-
- if (local->replies[i].op_ret == 0) {
- ret = 0;
- goto out;
- } else {
- ret = afr_higher_errno (ret,
- local->replies[i].op_errno);
- }
- }
out:
- return -ret;
+ syncbarrier_wake(&local->barrier);
+ return 0;
}
int
-_afr_handle_empty_brick_type (xlator_t *this, call_frame_t *frame,
- loc_t *loc, int empty_index,
- afr_transaction_type type,
- char *op_type)
+afr_emptyb_set_pending_changelog(call_frame_t *frame, xlator_t *this,
+ unsigned char *locked_nodes)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- unsigned char *locked_nodes = NULL;
- int count = 0;
- int ret = -ENOMEM;
- int idx = -1;
-
- priv = this->private;
- local = frame->local;
-
- locked_nodes = alloca0 (priv->child_count);
-
- idx = afr_index_for_transaction_type (type);
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int ret = 0, i = 0;
- local->pending = afr_matrix_create (priv->child_count,
- AFR_NUM_CHANGE_LOGS);
- if (!local->pending)
- goto out;
-
- local->pending[empty_index][idx] = hton32 (1);
-
- local->xdata_req = dict_new ();
- if (!local->xdata_req)
- goto out;
+ local = frame->local;
+ priv = this->private;
- ret = dict_set_str (local->xdata_req, "replicate-brick-op", op_type);
- if (ret)
- goto out;
+ AFR_ONLIST(locked_nodes, frame, afr_emptyb_set_pending_changelog_cbk,
+ xattrop, &local->loc, GF_XATTROP_ADD_ARRAY, local->xattr_req,
+ NULL);
- local->xattr_req = dict_new ();
- if (!local->xattr_req)
- goto out;
+ /* It is sufficient if xattrop was successful on one child */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
- ret = afr_set_pending_dict (priv, local->xattr_req, local->pending);
- if (ret < 0)
- goto out;
-
- if (AFR_ENTRY_TRANSACTION == type) {
- count = afr_selfheal_entrylk (frame, this, loc->inode,
- this->name, NULL, locked_nodes);
+ if (local->replies[i].op_ret == 0) {
+ ret = 0;
+ goto out;
} else {
- count = afr_selfheal_inodelk (frame, this, loc->inode,
- this->name, LLONG_MAX - 1, 0,
- locked_nodes);
- }
-
- if (!count) {
- gf_msg (this->name, GF_LOG_ERROR, EAGAIN,
- AFR_MSG_REPLACE_BRICK_STATUS, "Couldn't acquire lock on"
- " any child.");
- ret = -EAGAIN;
- goto unlock;
+ ret = afr_higher_errno(ret, local->replies[i].op_errno);
}
+ }
+out:
+ return -ret;
+}
- ret = afr_emptyb_set_pending_changelog (frame, this, locked_nodes);
- if (ret)
- goto unlock;
- ret = 0;
+static int
+_afr_handle_empty_brick_type(xlator_t *this, call_frame_t *frame, loc_t *loc,
+ int empty_index, afr_transaction_type type,
+ char *op_type, const int op_type_len)
+{
+ int count = 0;
+ int ret = -ENOMEM;
+ int idx = -1;
+ int d_idx = -1;
+ unsigned char *locked_nodes = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ locked_nodes = alloca0(priv->child_count);
+
+ idx = afr_index_for_transaction_type(type);
+ d_idx = afr_index_for_transaction_type(AFR_DATA_TRANSACTION);
+
+ local->pending = afr_matrix_create(priv->child_count, AFR_NUM_CHANGE_LOGS);
+ if (!local->pending)
+ goto out;
+
+ local->pending[empty_index][idx] = hton32(1);
+
+ if ((priv->esh_granular) && (type == AFR_ENTRY_TRANSACTION))
+ local->pending[empty_index][d_idx] = hton32(1);
+
+ local->xdata_req = dict_new();
+ if (!local->xdata_req)
+ goto out;
+
+ ret = dict_set_nstrn(local->xdata_req, "replicate-brick-op",
+ SLEN("replicate-brick-op"), op_type, op_type_len);
+ if (ret)
+ goto out;
+
+ local->xattr_req = dict_new();
+ if (!local->xattr_req)
+ goto out;
+
+ ret = afr_set_pending_dict(priv, local->xattr_req, local->pending);
+ if (ret < 0)
+ goto out;
+
+ if (AFR_ENTRY_TRANSACTION == type) {
+ count = afr_selfheal_entrylk(frame, this, loc->inode, this->name, NULL,
+ locked_nodes);
+ } else {
+ count = afr_selfheal_inodelk(frame, this, loc->inode, this->name,
+ LLONG_MAX - 1, 0, locked_nodes);
+ }
+
+ if (!count) {
+ gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, AFR_MSG_REPLACE_BRICK_STATUS,
+ NULL);
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = afr_emptyb_set_pending_changelog(frame, this, locked_nodes);
+ if (ret)
+ goto unlock;
+ ret = 0;
unlock:
- if (AFR_ENTRY_TRANSACTION == type) {
- afr_selfheal_unentrylk (frame, this, loc->inode, this->name,
- NULL, locked_nodes);
- } else {
- afr_selfheal_uninodelk (frame, this, loc->inode, this->name,
- LLONG_MAX - 1, 0, locked_nodes);
- }
+ if (AFR_ENTRY_TRANSACTION == type) {
+ afr_selfheal_unentrylk(frame, this, loc->inode, this->name, NULL,
+ locked_nodes, NULL);
+ } else {
+ afr_selfheal_uninodelk(frame, this, loc->inode, this->name,
+ LLONG_MAX - 1, 0, locked_nodes);
+ }
out:
- return ret;
+ return ret;
}
void
-afr_brick_args_cleanup (void *opaque)
-{
- afr_empty_brick_args_t *data = NULL;
-
- data = opaque;
- loc_wipe (&data->loc);
- GF_FREE (data);
-}
-
-int
-_afr_handle_empty_brick_cbk (int ret, call_frame_t *frame, void *opaque)
-{
- afr_brick_args_cleanup (opaque);
- return 0;
-}
-
-int
-_afr_handle_empty_brick (void *opaque)
-{
-
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int empty_index = -1;
- int ret = -1;
- int op_errno = ENOMEM;
- call_frame_t *frame = NULL;
- xlator_t *this = NULL;
- char *op_type = NULL;
- afr_empty_brick_args_t *data = NULL;
-
- data = opaque;
- frame = data->frame;
- empty_index = data->empty_index;
- op_type = data->op_type;
- this = frame->this;
- priv = this->private;
-
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
-
- loc_copy (&local->loc, &data->loc);
-
- gf_msg_debug (this->name, 0, "New brick is : %s",
- priv->children[empty_index]->name);
-
- ret = _afr_handle_empty_brick_type (this, frame, &local->loc, empty_index,
- AFR_METADATA_TRANSACTION, op_type);
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
-
- dict_unref (local->xdata_req);
- dict_unref (local->xattr_req);
- afr_matrix_cleanup (local->pending, priv->child_count);
- local->pending = NULL;
- local->xattr_req = NULL;
- local->xdata_req = NULL;
-
- ret = _afr_handle_empty_brick_type (this, frame, &local->loc, empty_index,
- AFR_ENTRY_TRANSACTION, op_type);
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
- ret = 0;
+afr_brick_args_cleanup(void *opaque)
+{
+ afr_empty_brick_args_t *data = NULL;
+
+ data = opaque;
+ loc_wipe(&data->loc);
+ GF_FREE(data);
+}
+
+int
+_afr_handle_empty_brick_cbk(int ret, call_frame_t *frame, void *opaque)
+{
+ afr_brick_args_cleanup(opaque);
+ return 0;
+}
+
+int
+_afr_handle_empty_brick(void *opaque)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int empty_index = -1;
+ int ret = -1;
+ int op_errno = ENOMEM;
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ char *op_type = NULL;
+ int op_type_len = 0;
+ afr_empty_brick_args_t *data = NULL;
+ call_frame_t *op_frame = NULL;
+
+ data = opaque;
+ frame = data->frame;
+ empty_index = data->empty_index;
+ if (!data->op_type)
+ goto out;
+
+ op_frame = copy_frame(frame);
+ if (!op_frame) {
+ ret = -1;
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ op_type = data->op_type;
+ op_type_len = strlen(op_type);
+ this = op_frame->this;
+ priv = this->private;
+
+ afr_set_lk_owner(op_frame, this, op_frame->root);
+ local = AFR_FRAME_INIT(op_frame, op_errno);
+ if (!local)
+ goto out;
+
+ loc_copy(&local->loc, &data->loc);
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, AFR_MSG_NEW_BRICK, "name=%s",
+ priv->children[empty_index]->name, NULL);
+
+ ret = _afr_handle_empty_brick_type(this, op_frame, &local->loc, empty_index,
+ AFR_METADATA_TRANSACTION, op_type,
+ op_type_len);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+
+ dict_unref(local->xdata_req);
+ dict_unref(local->xattr_req);
+ afr_matrix_cleanup(local->pending, priv->child_count);
+ local->pending = NULL;
+ local->xattr_req = NULL;
+ local->xdata_req = NULL;
+
+ ret = _afr_handle_empty_brick_type(this, op_frame, &local->loc, empty_index,
+ AFR_ENTRY_TRANSACTION, op_type,
+ op_type_len);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
out:
- AFR_STACK_UNWIND (setxattr, frame, ret, op_errno, NULL);
- return 0;
+ if (op_frame) {
+ AFR_STACK_DESTROY(op_frame);
+ }
+ AFR_STACK_UNWIND(setxattr, frame, ret, op_errno, NULL);
+ return 0;
+}
+
+int
+afr_split_brain_resolve_do(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ char *data)
+{
+ afr_local_t *local = NULL;
+ int ret = -1;
+ int op_errno = EINVAL;
+
+ local = frame->local;
+ local->xdata_req = dict_new();
+
+ if (!local->xdata_req) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_set_int32_sizen(local->xdata_req, "heal-op",
+ GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_str_sizen(local->xdata_req, "child-name", data);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+ /* set spb choice to -1 whether heal succeeds or not:
+ * If heal succeeds : spb-choice should be set to -1 as
+ * it is no longer valid; file is not
+ * in split-brain anymore.
+ * If heal doesn't succeed:
+ * spb-choice should be set to -1
+ * otherwise reads will be served
+ * from spb-choice which is misleading.
+ */
+ ret = afr_inode_split_brain_choice_set(loc->inode, this, -1);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN_SET_FAILED,
+ NULL);
+ afr_heal_splitbrain_file(frame, this, loc);
+ ret = 0;
+out:
+ if (ret < 0)
+ AFR_STACK_UNWIND(setxattr, frame, -1, op_errno, NULL);
+ return 0;
}
-
int
-afr_split_brain_resolve_do (call_frame_t *frame, xlator_t *this, loc_t *loc,
- char *data)
+afr_get_split_brain_child_index(xlator_t *this, void *value, size_t len)
{
- afr_local_t *local = NULL;
- int ret = -1;
- int op_errno = EINVAL;
+ int spb_child_index = -1;
+ char *spb_child_str = NULL;
- local = frame->local;
- local->xdata_req = dict_new ();
+ spb_child_str = alloca0(len + 1);
+ memcpy(spb_child_str, value, len);
- if (!local->xdata_req) {
- op_errno = ENOMEM;
- goto out;
- }
+ if (!strcmp(spb_child_str, "none"))
+ return -2;
- ret = dict_set_int32 (local->xdata_req, "heal-op",
- GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK);
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
- ret = dict_set_str (local->xdata_req, "child-name", data);
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
- /* set spb choice to -1 whether heal succeeds or not:
- * If heal succeeds : spb-choice should be set to -1 as
- * it is no longer valid; file is not
- * in split-brain anymore.
- * If heal doesn't succeed:
- * spb-choice should be set to -1
- * otherwise reads will be served
- * from spb-choice which is misleading.
- */
- ret = afr_inode_split_brain_choice_set (loc->inode, this, -1);
- if (ret)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR, "Failed to set"
- "split-brain choice to -1");
- afr_heal_splitbrain_file (frame, this, loc);
- ret = 0;
-out:
- if (ret < 0)
- AFR_STACK_UNWIND (setxattr, frame, -1, op_errno, NULL);
- return 0;
+ spb_child_index = afr_get_child_index_from_name(this, spb_child_str);
+ if (spb_child_index < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_SUBVOL,
+ "subvol=%s", spb_child_str, NULL);
+ }
+ return spb_child_index;
}
int
-afr_get_split_brain_child_index (xlator_t *this, void *value, size_t len)
+afr_can_set_split_brain_choice(void *opaque)
{
- int spb_child_index = -1;
- char *spb_child_str = NULL;
+ afr_spbc_timeout_t *data = opaque;
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ loc_t *loc = NULL;
+ int ret = -1;
- spb_child_str = alloca0 (len + 1);
- memcpy (spb_child_str, value, len);
+ frame = data->frame;
+ loc = data->loc;
+ this = frame->this;
- if (!strcmp (spb_child_str, "none"))
- return -2;
+ ret = afr_is_split_brain(frame, this, loc->inode, loc->gfid, &data->d_spb,
+ &data->m_spb);
- spb_child_index = afr_get_child_index_from_name (this,
- spb_child_str);
- if (spb_child_index < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_SUBVOL, "Invalid subvol: %s",
- spb_child_str);
- }
- return spb_child_index;
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ AFR_MSG_SPLIT_BRAIN_DETERMINE_FAILED, "gfid=%s",
+ uuid_utoa(loc->gfid), NULL);
+ return ret;
}
int
-afr_can_set_split_brain_choice (void *opaque)
+afr_handle_split_brain_commands(xlator_t *this, call_frame_t *frame, loc_t *loc,
+ dict_t *dict)
{
- afr_spbc_timeout_t *data = opaque;
- call_frame_t *frame = NULL;
- xlator_t *this = NULL;
- loc_t *loc = NULL;
- int ret = -1;
-
- frame = data->frame;
- loc = data->loc;
- this = frame->this;
+ void *choice_value = NULL;
+ void *resolve_value = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_spbc_timeout_t *data = NULL;
+ int len = 0;
+ int spb_child_index = -1;
+ int ret = -1;
+ int op_errno = EINVAL;
- ret = afr_is_split_brain (frame, this, loc->inode, loc->gfid,
- &data->d_spb, &data->m_spb);
+ priv = this->private;
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
- "Failed to determine if %s"
- " is in split-brain. "
- "Aborting split-brain-choice set.",
- uuid_utoa (loc->gfid));
- return ret;
-}
+ ret = dict_get_ptr_and_len(dict, GF_AFR_SBRAIN_CHOICE, &choice_value, &len);
+ ret = dict_get_ptr_and_len(dict, GF_AFR_SBRAIN_RESOLVE, &resolve_value,
+ &len);
+ if (!choice_value && !resolve_value) {
+ ret = -1;
+ goto out;
+ }
-int
-afr_handle_split_brain_commands (xlator_t *this, call_frame_t *frame,
- loc_t *loc, dict_t *dict)
-{
- void *value = NULL;
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- afr_spbc_timeout_t *data = NULL;
- int len = 0;
- int spb_child_index = -1;
- int ret = -1;
- int op_errno = EINVAL;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local) {
+ ret = 1;
+ goto out;
+ }
- priv = this->private;
+ local->op = GF_FOP_SETXATTR;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local) {
+ if (choice_value) {
+ spb_child_index = afr_get_split_brain_child_index(this, choice_value,
+ len);
+ if (spb_child_index < 0) {
+ /* Case where value was "none" */
+ if (spb_child_index == -2)
+ spb_child_index = -1;
+ else {
ret = 1;
+ op_errno = EINVAL;
goto out;
+ }
}
- local->op = GF_FOP_SETXATTR;
-
- ret = dict_get_ptr_and_len (dict, GF_AFR_SBRAIN_CHOICE, &value,
- &len);
- if (value) {
- spb_child_index = afr_get_split_brain_child_index (this, value,
- len);
- if (spb_child_index < 0) {
- /* Case where value was "none" */
- if (spb_child_index == -2)
- spb_child_index = -1;
- else {
- ret = 1;
- op_errno = EINVAL;
- goto out;
- }
- }
-
- data = GF_CALLOC (1, sizeof (*data), gf_afr_mt_spbc_timeout_t);
- if (!data) {
- ret = 1;
- goto out;
- }
- data->spb_child_index = spb_child_index;
- data->frame = frame;
- data->loc = loc;
- ret = synctask_new (this->ctx->env,
- afr_can_set_split_brain_choice,
- afr_set_split_brain_choice, NULL, data);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR,
- "Failed to create"
- " synctask. Aborting split-brain choice set"
- " for %s", loc->name);
- ret = 1;
- op_errno = ENOMEM;
- goto out;
- }
- ret = 0;
- goto out;
+ data = GF_CALLOC(1, sizeof(*data), gf_afr_mt_spbc_timeout_t);
+ if (!data) {
+ ret = 1;
+ goto out;
+ }
+ data->spb_child_index = spb_child_index;
+ data->frame = frame;
+ loc_copy(&local->loc, loc);
+ data->loc = &local->loc;
+ ret = synctask_new(this->ctx->env, afr_can_set_split_brain_choice,
+ afr_set_split_brain_choice, NULL, data);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN_STATUS,
+ "name=%s", loc->name, NULL);
+ ret = 1;
+ op_errno = ENOMEM;
+ goto out;
}
+ ret = 0;
+ goto out;
+ }
- ret = dict_get_ptr_and_len (dict, GF_AFR_SBRAIN_RESOLVE, &value, &len);
- if (value) {
- spb_child_index = afr_get_split_brain_child_index (this, value,
- len);
- if (spb_child_index < 0) {
- ret = 1;
- goto out;
- }
-
- afr_split_brain_resolve_do (frame, this, loc,
- priv->children[spb_child_index]->name);
- ret = 0;
+ if (resolve_value) {
+ spb_child_index = afr_get_split_brain_child_index(this, resolve_value,
+ len);
+ if (spb_child_index < 0) {
+ ret = 1;
+ goto out;
}
+
+ afr_split_brain_resolve_do(frame, this, loc,
+ priv->children[spb_child_index]->name);
+ ret = 0;
+ }
out:
- /* key was correct but value was invalid when ret == 1 */
- if (ret == 1) {
- AFR_STACK_UNWIND (setxattr, frame, -1, op_errno, NULL);
- if (data)
- GF_FREE (data);
- ret = 0;
- }
- return ret;
+ /* key was correct but value was invalid when ret == 1 */
+ if (ret == 1) {
+ AFR_STACK_UNWIND(setxattr, frame, -1, op_errno, NULL);
+ if (data)
+ GF_FREE(data);
+ ret = 0;
+ }
+ return ret;
}
int
-afr_handle_spb_choice_timeout (xlator_t *this, call_frame_t *frame,
- dict_t *dict)
+afr_handle_spb_choice_timeout(xlator_t *this, call_frame_t *frame, dict_t *dict)
{
- int ret = -1;
- int op_errno = 0;
- uint64_t timeout = 0;
- afr_private_t *priv = NULL;
+ int ret = -1;
+ int op_errno = 0;
+ uint64_t timeout = 0;
+ afr_private_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- ret = dict_get_uint64 (dict, GF_AFR_SPB_CHOICE_TIMEOUT, &timeout);
- if (!ret) {
- priv->spb_choice_timeout = timeout * 60;
- AFR_STACK_UNWIND (setxattr, frame, ret, op_errno, NULL);
- }
+ ret = dict_get_uint64(dict, GF_AFR_SPB_CHOICE_TIMEOUT, &timeout);
+ if (!ret) {
+ priv->spb_choice_timeout = timeout * 60;
+ AFR_STACK_UNWIND(setxattr, frame, ret, op_errno, NULL);
+ }
- return ret;
+ return ret;
}
int
-afr_handle_empty_brick (xlator_t *this, call_frame_t *frame, loc_t *loc,
- dict_t *dict)
+afr_handle_empty_brick(xlator_t *this, call_frame_t *frame, loc_t *loc,
+ dict_t *dict)
{
- int ret = -1;
- int ab_ret = -1;
- int empty_index = -1;
- int op_errno = EPERM;
- char *empty_brick = NULL;
- char *op_type = NULL;
- afr_empty_brick_args_t *data = NULL;
+ int ret = -1;
+ int ab_ret = -1;
+ int empty_index = -1;
+ int op_errno = EPERM;
+ char *empty_brick = NULL;
+ char *op_type = NULL;
+ afr_empty_brick_args_t *data = NULL;
- ret = dict_get_str (dict, GF_AFR_REPLACE_BRICK, &empty_brick);
- if (!ret)
- op_type = GF_AFR_REPLACE_BRICK;
+ ret = dict_get_str_sizen(dict, GF_AFR_REPLACE_BRICK, &empty_brick);
+ if (!ret)
+ op_type = GF_AFR_REPLACE_BRICK;
- ab_ret = dict_get_str (dict, GF_AFR_ADD_BRICK, &empty_brick);
- if (!ab_ret)
- op_type = GF_AFR_ADD_BRICK;
+ ab_ret = dict_get_str_sizen(dict, GF_AFR_ADD_BRICK, &empty_brick);
+ if (!ab_ret)
+ op_type = GF_AFR_ADD_BRICK;
- if (ret && ab_ret)
- goto out;
+ if (ret && ab_ret)
+ goto out;
- if (frame->root->pid != GF_CLIENT_PID_SELF_HEALD) {
- gf_msg (this->name, GF_LOG_ERROR, EPERM,
- afr_get_msg_id (op_type),
- "'%s' is an internal extended attribute.",
- op_type);
- ret = 1;
- goto out;
+ if (frame->root->pid != GF_CLIENT_PID_ADD_REPLICA_MOUNT) {
+ gf_smsg(this->name, GF_LOG_ERROR, EPERM, AFR_MSG_INTERNAL_ATTR,
+ "op_type=%s", op_type, NULL);
+ ret = 1;
+ goto out;
+ }
+ empty_index = afr_get_child_index_from_name(this, empty_brick);
+
+ if (empty_index < 0) {
+ /* Didn't belong to this replica pair
+ * Just do a no-op
+ */
+ AFR_STACK_UNWIND(setxattr, frame, 0, 0, NULL);
+ return 0;
+ } else {
+ data = GF_CALLOC(1, sizeof(*data), gf_afr_mt_empty_brick_t);
+ if (!data) {
+ ret = 1;
+ op_errno = ENOMEM;
+ goto out;
}
- empty_index = afr_get_child_index_from_name (this, empty_brick);
-
- if (empty_index < 0) {
- /* Didn't belong to this replica pair
- * Just do a no-op
- */
- AFR_STACK_UNWIND (setxattr, frame, 0, 0, NULL);
- return 0;
- } else {
- data = GF_CALLOC (1, sizeof (*data),
- gf_afr_mt_empty_brick_t);
- if (!data) {
- ret = 1;
- op_errno = ENOMEM;
- goto out;
- }
- data->frame = frame;
- loc_copy (&data->loc, loc);
- data->empty_index = empty_index;
- data->op_type = op_type;
- ret = synctask_new (this->ctx->env,
- _afr_handle_empty_brick,
- _afr_handle_empty_brick_cbk,
- NULL, data);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- afr_get_msg_id (op_type),
- "Failed to create synctask.");
- ret = 1;
- op_errno = ENOMEM;
- afr_brick_args_cleanup (data);
- goto out;
- }
+ data->frame = frame;
+ loc_copy(&data->loc, loc);
+ data->empty_index = empty_index;
+ data->op_type = op_type;
+ ret = synctask_new(this->ctx->env, _afr_handle_empty_brick,
+ _afr_handle_empty_brick_cbk, NULL, data);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN_STATUS,
+ NULL);
+ ret = 1;
+ op_errno = ENOMEM;
+ afr_brick_args_cleanup(data);
+ goto out;
}
- ret = 0;
+ }
+ ret = 0;
out:
- if (ret == 1) {
- AFR_STACK_UNWIND (setxattr, frame, -1, op_errno, NULL);
- ret = 0;
- }
- return ret;
+ if (ret == 1) {
+ AFR_STACK_UNWIND(setxattr, frame, -1, op_errno, NULL);
+ ret = 0;
+ }
+ return ret;
}
static int
-afr_handle_special_xattr (xlator_t *this, call_frame_t *frame, loc_t *loc,
- dict_t *dict)
+afr_handle_special_xattr(xlator_t *this, call_frame_t *frame, loc_t *loc,
+ dict_t *dict)
{
- int ret = -1;
+ int ret = -1;
- ret = afr_handle_split_brain_commands (this, frame, loc, dict);
- if (ret == 0)
- goto out;
+ ret = afr_handle_split_brain_commands(this, frame, loc, dict);
+ if (ret == 0)
+ goto out;
- ret = afr_handle_spb_choice_timeout (this, frame, dict);
- if (ret == 0)
- goto out;
+ ret = afr_handle_spb_choice_timeout(this, frame, dict);
+ if (ret == 0)
+ goto out;
- /* Applicable for replace-brick and add-brick commands */
- ret = afr_handle_empty_brick (this, frame, loc, dict);
+ /* Applicable for replace-brick and add-brick commands */
+ ret = afr_handle_empty_brick(this, frame, loc, dict);
out:
- return ret;
+ return ret;
}
int
-afr_setxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
- int32_t flags, dict_t *xdata)
+afr_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = EINVAL;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = EINVAL;
- GF_IF_INTERNAL_XATTR_GOTO ("trusted.afr.*", dict,
- op_errno, out);
+ GF_IF_INTERNAL_XATTR_GOTO("trusted.afr.*", dict, op_errno, out);
- GF_IF_INTERNAL_XATTR_GOTO ("trusted.glusterfs.afr.*", dict,
- op_errno, out);
+ GF_IF_INTERNAL_XATTR_GOTO("trusted.glusterfs.afr.*", dict, op_errno, out);
- ret = afr_handle_special_xattr (this, frame, loc, dict);
- if (ret == 0)
- return 0;
+ ret = afr_handle_special_xattr(this, frame, loc, dict);
+ if (ret == 0)
+ return 0;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.setxattr.dict = dict_ref (dict);
- local->cont.setxattr.flags = flags;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ local->cont.setxattr.dict = dict_ref(dict);
+ local->cont.setxattr.flags = flags;
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->transaction.wind = afr_setxattr_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_setxattr_unwind;
+ local->transaction.wind = afr_setxattr_wind;
+ local->transaction.unwind = afr_setxattr_unwind;
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
+ loc_copy(&local->loc, loc);
+ ret = afr_set_inode_local(this, local, loc->inode);
+ if (ret)
+ goto out;
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- local->op = GF_FOP_SETXATTR;
+ local->op = GF_FOP_SETXATTR;
- ret = afr_transaction (transaction_frame, this, AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (setxattr, frame, -1, op_errno, NULL);
+ AFR_STACK_UNWIND(setxattr, frame, -1, op_errno, NULL);
- return 0;
+ return 0;
}
/* {{{ fsetxattr */
-
int
-afr_fsetxattr_unwind (call_frame_t *frame, xlator_t *this)
+afr_fsetxattr_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (fsetxattr, main_frame, local->op_ret, local->op_errno,
- local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(fsetxattr, main_frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return 0;
+}
int
-afr_fsetxattr_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_fsetxattr_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- NULL, NULL, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ NULL, NULL, xdata);
}
-
int
-afr_fsetxattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_fsetxattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_fsetxattr_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->fsetxattr,
- local->fd, local->cont.fsetxattr.dict,
- local->cont.fsetxattr.flags, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_fsetxattr_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->fsetxattr, local->fd,
+ local->cont.fsetxattr.dict, local->cont.fsetxattr.flags,
+ local->xdata_req);
+ return 0;
}
-
int
-afr_fsetxattr (call_frame_t *frame, xlator_t *this,
- fd_t *fd, dict_t *dict, int32_t flags, dict_t *xdata)
+afr_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- GF_IF_INTERNAL_XATTR_GOTO ("trusted.afr.*", dict,
- op_errno, out);
+ GF_IF_INTERNAL_XATTR_GOTO("trusted.afr.*", dict, op_errno, out);
- GF_IF_INTERNAL_XATTR_GOTO ("trusted.glusterfs.afr.*", dict,
- op_errno, out);
+ GF_IF_INTERNAL_XATTR_GOTO("trusted.glusterfs.afr.*", dict, op_errno, out);
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.fsetxattr.dict = dict_ref (dict);
- local->cont.fsetxattr.flags = flags;
+ local->cont.fsetxattr.dict = dict_ref(dict);
+ local->cont.fsetxattr.flags = flags;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->transaction.wind = afr_fsetxattr_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_fsetxattr_unwind;
+ local->transaction.wind = afr_fsetxattr_wind;
+ local->transaction.unwind = afr_fsetxattr_unwind;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_FSETXATTR;
+ local->op = GF_FOP_FSETXATTR;
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- ret = afr_transaction (transaction_frame, this, AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (fsetxattr, frame, -1, op_errno, NULL);
- return 0;
+ AFR_STACK_UNWIND(fsetxattr, frame, -1, op_errno, NULL);
+ return 0;
}
/* }}} */
-
/* {{{ removexattr */
-
int
-afr_removexattr_unwind (call_frame_t *frame, xlator_t *this)
+afr_removexattr_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
-
- local = frame->local;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (removexattr, main_frame, local->op_ret, local->op_errno,
- local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(removexattr, main_frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return 0;
+}
int
-afr_removexattr_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_removexattr_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- NULL, NULL, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ NULL, NULL, xdata);
}
-
int
-afr_removexattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_removexattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_removexattr_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->removexattr,
- &local->loc, local->cont.removexattr.name,
- local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_removexattr_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->removexattr, &local->loc,
+ local->cont.removexattr.name, local->xdata_req);
+ return 0;
}
-
int
-afr_removexattr (call_frame_t *frame, xlator_t *this,
- loc_t *loc, const char *name, dict_t *xdata)
+afr_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- GF_IF_NATIVE_XATTR_GOTO ("trusted.afr.*",
- name, op_errno, out);
+ GF_IF_NATIVE_XATTR_GOTO("trusted.afr.*", name, op_errno, out);
- GF_IF_NATIVE_XATTR_GOTO ("trusted.glusterfs.afr.*",
- name, op_errno, out);
+ GF_IF_NATIVE_XATTR_GOTO("trusted.glusterfs.afr.*", name, op_errno, out);
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.removexattr.name = gf_strdup (name);
+ local->cont.removexattr.name = gf_strdup(name);
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->transaction.wind = afr_removexattr_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_removexattr_unwind;
+ local->transaction.wind = afr_removexattr_wind;
+ local->transaction.unwind = afr_removexattr_unwind;
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
+ loc_copy(&local->loc, loc);
+ ret = afr_set_inode_local(this, local, loc->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_REMOVEXATTR;
+ local->op = GF_FOP_REMOVEXATTR;
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- ret = afr_transaction (transaction_frame, this, AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (removexattr, frame, -1, op_errno, NULL);
- return 0;
+ AFR_STACK_UNWIND(removexattr, frame, -1, op_errno, NULL);
+ return 0;
}
/* ffremovexattr */
int
-afr_fremovexattr_unwind (call_frame_t *frame, xlator_t *this)
+afr_fremovexattr_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (fremovexattr, main_frame, local->op_ret, local->op_errno,
- local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(fremovexattr, main_frame, local->op_ret, local->op_errno,
+ local->xdata_rsp);
+ return 0;
+}
int
-afr_fremovexattr_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+afr_fremovexattr_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- NULL, NULL, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ NULL, NULL, xdata);
}
-
int
-afr_fremovexattr_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_fremovexattr_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_fremovexattr_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->fremovexattr,
- local->fd, local->cont.removexattr.name,
- local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_fremovexattr_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->fremovexattr, local->fd,
+ local->cont.removexattr.name, local->xdata_req);
+ return 0;
}
-
int
-afr_fremovexattr (call_frame_t *frame, xlator_t *this, fd_t *fd,
- const char *name, dict_t *xdata)
+afr_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- GF_IF_NATIVE_XATTR_GOTO ("trusted.afr.*",
- name, op_errno, out);
+ GF_IF_NATIVE_XATTR_GOTO("trusted.afr.*", name, op_errno, out);
- GF_IF_NATIVE_XATTR_GOTO ("trusted.glusterfs.afr.*",
- name, op_errno, out);
+ GF_IF_NATIVE_XATTR_GOTO("trusted.glusterfs.afr.*", name, op_errno, out);
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.removexattr.name = gf_strdup (name);
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ local->cont.removexattr.name = gf_strdup(name);
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->transaction.wind = afr_fremovexattr_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_fremovexattr_unwind;
+ local->transaction.wind = afr_fremovexattr_wind;
+ local->transaction.unwind = afr_fremovexattr_unwind;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_FREMOVEXATTR;
+ local->op = GF_FOP_FREMOVEXATTR;
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- ret = afr_transaction (transaction_frame, this, AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (fremovexattr, frame, -1, op_errno, NULL);
+ AFR_STACK_UNWIND(fremovexattr, frame, -1, op_errno, NULL);
- return 0;
+ return 0;
}
-
int
-afr_fallocate_unwind (call_frame_t *frame, xlator_t *this)
+afr_fallocate_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (fallocate, main_frame, local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(fallocate, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ return 0;
+}
int
-afr_fallocate_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
+afr_fallocate_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- prebuf, postbuf, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, prebuf,
+ postbuf, NULL, xdata);
}
-
int
-afr_fallocate_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_fallocate_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_fallocate_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->fallocate,
- local->fd, local->cont.fallocate.mode,
- local->cont.fallocate.offset,
- local->cont.fallocate.len, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_fallocate_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->fallocate, local->fd,
+ local->cont.fallocate.mode, local->cont.fallocate.offset,
+ local->cont.fallocate.len, local->xdata_req);
+ return 0;
}
-
int
-afr_fallocate (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
- off_t offset, size_t len, dict_t *xdata)
+afr_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
+ off_t offset, size_t len, dict_t *xdata)
{
- call_frame_t *transaction_frame = NULL;
- afr_local_t *local = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ call_frame_t *transaction_frame = NULL;
+ afr_local_t *local = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.fallocate.mode = mode;
- local->cont.fallocate.offset = offset;
- local->cont.fallocate.len = len;
+ local->cont.fallocate.mode = mode;
+ local->cont.fallocate.offset = offset;
+ local->cont.fallocate.len = len;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->op = GF_FOP_FALLOCATE;
+ local->op = GF_FOP_FALLOCATE;
- local->transaction.wind = afr_fallocate_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_fallocate_unwind;
+ local->transaction.wind = afr_fallocate_wind;
+ local->transaction.unwind = afr_fallocate_unwind;
- local->transaction.main_frame = frame;
+ local->transaction.main_frame = frame;
- local->transaction.start = local->cont.fallocate.offset;
- local->transaction.len = 0;
+ local->transaction.start = local->cont.fallocate.offset;
+ local->transaction.len = 0;
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- ret = afr_transaction (transaction_frame, this, AFR_DATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_DATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (fallocate, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(fallocate, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
-
/* }}} */
/* {{{ discard */
int
-afr_discard_unwind (call_frame_t *frame, xlator_t *this)
+afr_discard_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (discard, main_frame, local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(discard, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ return 0;
+}
int
-afr_discard_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
+afr_discard_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- prebuf, postbuf, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, prebuf,
+ postbuf, NULL, xdata);
}
-
int
-afr_discard_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_discard_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_discard_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->discard,
- local->fd, local->cont.discard.offset,
- local->cont.discard.len, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_discard_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->discard, local->fd,
+ local->cont.discard.offset, local->cont.discard.len,
+ local->xdata_req);
+ return 0;
}
-
int
-afr_discard (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
- size_t len, dict_t *xdata)
+afr_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.discard.offset = offset;
- local->cont.discard.len = len;
+ local->cont.discard.offset = offset;
+ local->cont.discard.len = len;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->op = GF_FOP_DISCARD;
+ local->op = GF_FOP_DISCARD;
- local->transaction.wind = afr_discard_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_discard_unwind;
+ local->transaction.wind = afr_discard_wind;
+ local->transaction.unwind = afr_discard_unwind;
- local->transaction.main_frame = frame;
+ local->transaction.main_frame = frame;
- local->transaction.start = local->cont.discard.offset;
- local->transaction.len = 0;
+ local->transaction.start = local->cont.discard.offset;
+ local->transaction.len = 0;
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- ret = afr_transaction (transaction_frame, this, AFR_DATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_DATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (discard, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(discard, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
-
/* {{{ zerofill */
int
-afr_zerofill_unwind (call_frame_t *frame, xlator_t *this)
+afr_zerofill_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (discard, main_frame, local->op_ret, local->op_errno,
- &local->cont.inode_wfop.prebuf,
- &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
-}
+ AFR_STACK_UNWIND(discard, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+ return 0;
+}
int
-afr_zerofill_wind_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+afr_zerofill_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
struct iatt *postbuf, dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- prebuf, postbuf, NULL, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, prebuf,
+ postbuf, NULL, xdata);
}
-
int
-afr_zerofill_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_zerofill_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_zerofill_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->zerofill,
- local->fd, local->cont.zerofill.offset,
- local->cont.zerofill.len, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_zerofill_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->zerofill, local->fd,
+ local->cont.zerofill.offset, local->cont.zerofill.len,
+ local->xdata_req);
+ return 0;
}
int
-afr_zerofill (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+afr_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
size_t len, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.zerofill.offset = offset;
- local->cont.zerofill.len = len;
+ local->cont.zerofill.offset = offset;
+ local->cont.zerofill.len = len;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- if (xdata)
- local->xdata_req = dict_copy_with_ref (xdata, NULL);
- else
- local->xdata_req = dict_new ();
+ if (xdata)
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ else
+ local->xdata_req = dict_new();
- if (!local->xdata_req)
- goto out;
+ if (!local->xdata_req)
+ goto out;
- local->op = GF_FOP_ZEROFILL;
+ local->op = GF_FOP_ZEROFILL;
- local->transaction.wind = afr_zerofill_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_zerofill_unwind;
+ local->transaction.wind = afr_zerofill_wind;
+ local->transaction.unwind = afr_zerofill_unwind;
- local->transaction.main_frame = frame;
+ local->transaction.main_frame = frame;
- local->transaction.start = local->cont.discard.offset;
- local->transaction.len = len;
+ local->transaction.start = local->cont.zerofill.offset;
+ local->transaction.len = len;
- afr_fix_open (fd, this);
+ afr_fix_open(fd, this);
- ret = afr_transaction (transaction_frame, this, AFR_DATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_DATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (zerofill, frame, -1, op_errno, NULL, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(zerofill, frame, -1, op_errno, NULL, NULL, NULL);
+ return 0;
}
/* }}} */
int32_t
-afr_xattrop_wind_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *xattr, dict_t *xdata)
+afr_xattrop_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xattr,
+ dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- NULL, NULL, xattr, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ NULL, xattr, xdata);
}
int
-afr_xattrop_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_xattrop_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_xattrop_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->xattrop,
- &local->loc, local->cont.xattrop.optype,
- local->cont.xattrop.xattr, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_xattrop_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->xattrop, &local->loc,
+ local->cont.xattrop.optype, local->cont.xattrop.xattr,
+ local->xdata_req);
+ return 0;
}
int
-afr_xattrop_unwind (call_frame_t *frame, xlator_t *this)
+afr_xattrop_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- call_frame_t *main_frame = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- local = frame->local;
+ local = frame->local;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
-
- AFR_STACK_UNWIND (xattrop, main_frame, local->op_ret, local->op_errno,
- local->xattr_rsp, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
+
+ AFR_STACK_UNWIND(xattrop, main_frame, local->op_ret, local->op_errno,
+ local->xattr_rsp, local->xdata_rsp);
+ return 0;
}
int32_t
-afr_xattrop (call_frame_t *frame, xlator_t *this, loc_t *loc,
- gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+afr_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.xattrop.xattr = dict_ref (xattr);
- local->cont.xattrop.optype = optype;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->cont.xattrop.xattr = dict_ref(xattr);
+ local->cont.xattrop.optype = optype;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- local->transaction.wind = afr_xattrop_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_xattrop_unwind;
+ local->transaction.wind = afr_xattrop_wind;
+ local->transaction.unwind = afr_xattrop_unwind;
- loc_copy (&local->loc, loc);
- local->inode = inode_ref (loc->inode);
+ loc_copy(&local->loc, loc);
+ ret = afr_set_inode_local(this, local, loc->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_XATTROP;
+ local->op = GF_FOP_XATTROP;
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- ret = afr_transaction (transaction_frame, this, AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
- AFR_STACK_UNWIND (xattrop, frame, -1, op_errno, NULL, NULL);
- return 0;
+ AFR_STACK_UNWIND(xattrop, frame, -1, op_errno, NULL, NULL);
+ return 0;
}
int32_t
-afr_fxattrop_wind_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- dict_t *xattr, dict_t *xdata)
+afr_fxattrop_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xattr,
+ dict_t *xdata)
{
- return __afr_inode_write_cbk (frame, cookie, this, op_ret, op_errno,
- NULL, NULL, xattr, xdata);
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, NULL,
+ NULL, xattr, xdata);
}
int
-afr_fxattrop_wind (call_frame_t *frame, xlator_t *this, int subvol)
+afr_fxattrop_wind(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- STACK_WIND_COOKIE (frame, afr_fxattrop_wind_cbk, (void *) (long) subvol,
- priv->children[subvol],
- priv->children[subvol]->fops->fxattrop,
- local->fd, local->cont.xattrop.optype,
- local->cont.xattrop.xattr, local->xdata_req);
- return 0;
+ STACK_WIND_COOKIE(frame, afr_fxattrop_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->fxattrop, local->fd,
+ local->cont.xattrop.optype, local->cont.xattrop.xattr,
+ local->xdata_req);
+ return 0;
}
int
-afr_fxattrop_unwind (call_frame_t *frame, xlator_t *this)
+afr_fxattrop_unwind(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- call_frame_t *main_frame = NULL;
-
- local = frame->local;
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
- main_frame = afr_transaction_detach_fop_frame (frame);
- if (!main_frame)
- return 0;
+ local = frame->local;
- AFR_STACK_UNWIND (fxattrop, main_frame, local->op_ret, local->op_errno,
- local->xattr_rsp, local->xdata_rsp);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
+
+ AFR_STACK_UNWIND(fxattrop, main_frame, local->op_ret, local->op_errno,
+ local->xattr_rsp, local->xdata_rsp);
+ return 0;
}
int32_t
-afr_fxattrop (call_frame_t *frame, xlator_t *this, fd_t *fd,
- gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+afr_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
{
- afr_local_t *local = NULL;
- call_frame_t *transaction_frame = NULL;
- int ret = -1;
- int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int op_errno = ENOMEM;
- transaction_frame = copy_frame (frame);
- if (!transaction_frame)
- goto out;
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
- local = AFR_FRAME_INIT (transaction_frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
- local->cont.xattrop.xattr = dict_ref (xattr);
- local->cont.xattrop.optype = optype;
- if (xdata)
- local->xdata_req = dict_ref (xdata);
+ local->cont.xattrop.xattr = dict_ref(xattr);
+ local->cont.xattrop.optype = optype;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
- local->transaction.wind = afr_fxattrop_wind;
- local->transaction.fop = __afr_txn_write_fop;
- local->transaction.done = __afr_txn_write_done;
- local->transaction.unwind = afr_fxattrop_unwind;
+ local->transaction.wind = afr_fxattrop_wind;
+ local->transaction.unwind = afr_fxattrop_unwind;
- local->fd = fd_ref (fd);
- local->inode = inode_ref (fd->inode);
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
- local->op = GF_FOP_FXATTROP;
+ local->op = GF_FOP_FXATTROP;
- local->transaction.main_frame = frame;
- local->transaction.start = LLONG_MAX - 1;
- local->transaction.len = 0;
+ local->transaction.main_frame = frame;
+ local->transaction.start = LLONG_MAX - 1;
+ local->transaction.len = 0;
- ret = afr_transaction (transaction_frame, this,
- AFR_METADATA_TRANSACTION);
- if (ret < 0) {
- op_errno = -ret;
- goto out;
- }
+ ret = afr_transaction(transaction_frame, this, AFR_METADATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
- return 0;
+ return 0;
out:
- if (transaction_frame)
- AFR_STACK_DESTROY (transaction_frame);
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
+
+ AFR_STACK_UNWIND(fxattrop, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+int
+afr_fsync_unwind(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ call_frame_t *main_frame = NULL;
+
+ local = frame->local;
- AFR_STACK_UNWIND (fxattrop, frame, -1, op_errno, NULL, NULL);
+ main_frame = afr_transaction_detach_fop_frame(frame);
+ if (!main_frame)
return 0;
+
+ AFR_STACK_UNWIND(fsync, main_frame, local->op_ret, local->op_errno,
+ &local->cont.inode_wfop.prebuf,
+ &local->cont.inode_wfop.postbuf, local->xdata_rsp);
+
+ return 0;
+}
+
+int
+afr_fsync_wind_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ return __afr_inode_write_cbk(frame, cookie, this, op_ret, op_errno, prebuf,
+ postbuf, NULL, xdata);
+}
+
+int
+afr_fsync_wind(call_frame_t *frame, xlator_t *this, int subvol)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ STACK_WIND_COOKIE(frame, afr_fsync_wind_cbk, (void *)(long)subvol,
+ priv->children[subvol],
+ priv->children[subvol]->fops->fsync, local->fd,
+ local->cont.fsync.datasync, local->xdata_req);
+ return 0;
+}
+
+int
+afr_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ call_frame_t *transaction_frame = NULL;
+ int ret = -1;
+ int32_t op_errno = ENOMEM;
+ int8_t last_fsync = 0;
+
+ AFR_ERROR_OUT_IF_FDCTX_INVALID(fd, this, op_errno, out);
+ transaction_frame = copy_frame(frame);
+ if (!transaction_frame)
+ goto out;
+
+ local = AFR_FRAME_INIT(transaction_frame, op_errno);
+ if (!local)
+ goto out;
+
+ if (xdata) {
+ local->xdata_req = dict_copy_with_ref(xdata, NULL);
+ if (dict_get_int8(xdata, "last-fsync", &last_fsync) == 0) {
+ if (last_fsync) {
+ local->transaction.disable_delayed_post_op = _gf_true;
+ }
+ }
+ } else {
+ local->xdata_req = dict_new();
+ }
+
+ if (!local->xdata_req)
+ goto out;
+
+ local->fd = fd_ref(fd);
+ ret = afr_set_inode_local(this, local, fd->inode);
+ if (ret)
+ goto out;
+
+ local->op = GF_FOP_FSYNC;
+ local->cont.fsync.datasync = datasync;
+
+ if (afr_fd_has_witnessed_unstable_write(this, fd->inode)) {
+ /* don't care. we only wanted to CLEAR the bit */
+ }
+
+ local->transaction.wind = afr_fsync_wind;
+ local->transaction.unwind = afr_fsync_unwind;
+
+ local->transaction.main_frame = frame;
+
+ ret = afr_transaction(transaction_frame, this, AFR_DATA_TRANSACTION);
+ if (ret < 0) {
+ op_errno = -ret;
+ goto out;
+ }
+
+ return 0;
+out:
+ if (transaction_frame)
+ AFR_STACK_DESTROY(transaction_frame);
+
+ AFR_STACK_UNWIND(fsync, frame, -1, op_errno, NULL, NULL, NULL);
+
+ return 0;
}
diff --git a/xlators/cluster/afr/src/afr-inode-write.h b/xlators/cluster/afr/src/afr-inode-write.h
index e174cc2d610..a787069b7a1 100644
--- a/xlators/cluster/afr/src/afr-inode-write.h
+++ b/xlators/cluster/afr/src/afr-inode-write.h
@@ -12,79 +12,83 @@
#define __INODE_WRITE_H__
int32_t
-afr_chmod (call_frame_t *frame, xlator_t *this,
- loc_t *loc, mode_t mode, dict_t *xdata);
+afr_chmod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dict_t *xdata);
int32_t
-afr_chown (call_frame_t *frame, xlator_t *this,
- loc_t *loc, uid_t uid, gid_t gid, dict_t *xdata);
+afr_chown(call_frame_t *frame, xlator_t *this, loc_t *loc, uid_t uid, gid_t gid,
+ dict_t *xdata);
int
-afr_fchown (call_frame_t *frame, xlator_t *this,
- fd_t *fd, uid_t uid, gid_t gid, dict_t *xdata);
+afr_fchown(call_frame_t *frame, xlator_t *this, fd_t *fd, uid_t uid, gid_t gid,
+ dict_t *xdata);
int32_t
-afr_fchmod (call_frame_t *frame, xlator_t *this,
- fd_t *fd, mode_t mode, dict_t *xdata);
+afr_fchmod(call_frame_t *frame, xlator_t *this, fd_t *fd, mode_t mode,
+ dict_t *xdata);
int32_t
-afr_writev (call_frame_t *frame, xlator_t *this, fd_t *fd,
- struct iovec *vector, int32_t count, off_t offset,
- uint32_t flags, struct iobref *iobref, dict_t *xdata);
+afr_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *vector,
+ int32_t count, off_t offset, uint32_t flags, struct iobref *iobref,
+ dict_t *xdata);
int32_t
-afr_truncate (call_frame_t *frame, xlator_t *this,
- loc_t *loc, off_t offset, dict_t *xdata);
+afr_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata);
int32_t
-afr_ftruncate (call_frame_t *frame, xlator_t *this,
- fd_t *fd, off_t offset, dict_t *xdata);
+afr_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata);
int32_t
-afr_utimens (call_frame_t *frame, xlator_t *this,
- loc_t *loc, struct timespec tv[2], dict_t *xdata);
+afr_utimens(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct timespec tv[2], dict_t *xdata);
int
-afr_setattr (call_frame_t *frame, xlator_t *this,
- loc_t *loc, struct iatt *buf, int32_t valid, dict_t *xdata);
+afr_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *buf,
+ int32_t valid, dict_t *xdata);
int
-afr_fsetattr (call_frame_t *frame, xlator_t *this,
- fd_t *fd, struct iatt *buf, int32_t valid, dict_t *xdata);
+afr_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iatt *buf,
+ int32_t valid, dict_t *xdata);
int32_t
-afr_setxattr (call_frame_t *frame, xlator_t *this,
- loc_t *loc, dict_t *dict, int32_t flags, dict_t *xdata);
+afr_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata);
int32_t
-afr_fsetxattr (call_frame_t *frame, xlator_t *this,
- fd_t *fd, dict_t *dict, int32_t flags, dict_t *xdata);
+afr_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata);
int32_t
-afr_removexattr (call_frame_t *frame, xlator_t *this,
- loc_t *loc, const char *name, dict_t *xdata);
+afr_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata);
int32_t
-afr_fremovexattr (call_frame_t *frame, xlator_t *this,
- fd_t *fd, const char *name, dict_t *xdata);
+afr_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata);
int
-afr_discard (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
- size_t len, dict_t *xdata);
+afr_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata);
int
-afr_fallocate (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
- off_t offset, size_t len, dict_t *xdata);
+afr_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
+ off_t offset, size_t len, dict_t *xdata);
int
afr_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
off_t len, dict_t *xdata);
int32_t
-afr_xattrop (call_frame_t *frame, xlator_t *this, loc_t *loc,
- gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata);
+afr_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata);
int32_t
-afr_fxattrop (call_frame_t *frame, xlator_t *this, fd_t *fd,
- gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata);
+afr_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata);
+
+int
+afr_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata);
#endif /* __INODE_WRITE_H__ */
diff --git a/xlators/cluster/afr/src/afr-lk-common.c b/xlators/cluster/afr/src/afr-lk-common.c
index 53bb7920089..bc8eabe0f43 100644
--- a/xlators/cluster/afr/src/afr-lk-common.c
+++ b/xlators/cluster/afr/src/afr-lk-common.c
@@ -8,9 +8,9 @@
cases as published by the Free Software Foundation.
*/
-#include "dict.h"
-#include "byte-order.h"
-#include "common-utils.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/common-utils.h>
#include "afr.h"
#include "afr-transaction.h"
@@ -18,1669 +18,774 @@
#include <signal.h>
-
-#define LOCKED_NO 0x0 /* no lock held */
-#define LOCKED_YES 0x1 /* for DATA, METADATA, ENTRY and higher_path */
-#define LOCKED_LOWER 0x2 /* for lower path */
-
-#define AFR_TRACE_INODELK_IN(frame, this, params ...) \
- do { \
- afr_private_t *_priv = this->private; \
- if (!_priv->inodelk_trace) \
- break; \
- afr_trace_inodelk_in (frame, this, params); \
- } while (0);
-
-#define AFR_TRACE_INODELK_OUT(frame, this, params ...) \
- do { \
- afr_private_t *_priv = this->private; \
- if (!_priv->inodelk_trace) \
- break; \
- afr_trace_inodelk_out (frame, this, params); \
- } while (0);
-
-#define AFR_TRACE_ENTRYLK_IN(frame, this, params ...) \
- do { \
- afr_private_t *_priv = this->private; \
- if (!_priv->entrylk_trace) \
- break; \
- afr_trace_entrylk_in (frame, this, params); \
- } while (0);
-
-#define AFR_TRACE_ENTRYLK_OUT(frame, this, params ...) \
- do { \
- afr_private_t *_priv = this->private; \
- if (!_priv->entrylk_trace) \
- break; \
- afr_trace_entrylk_out (frame, this, params); \
- } while (0);
-
-int
-afr_entry_lockee_cmp (const void *l1, const void *l2)
-{
- const afr_entry_lockee_t *r1 = l1;
- const afr_entry_lockee_t *r2 = l2;
- int ret = 0;
- uuid_t gfid1 = {0};
- uuid_t gfid2 = {0};
-
- loc_gfid ((loc_t*)&r1->loc, gfid1);
- loc_gfid ((loc_t*)&r2->loc, gfid2);
- ret = gf_uuid_compare (gfid1, gfid2);
- /*Entrylks with NULL basename are the 'smallest'*/
- if (ret == 0) {
- if (!r1->basename)
- return -1;
- if (!r2->basename)
- return 1;
- ret = strcmp (r1->basename, r2->basename);
- }
-
- if (ret <= 0)
- return -1;
- else
- return 1;
-}
-
-int afr_lock_blocking (call_frame_t *frame, xlator_t *this, int child_index);
-
-static int
-afr_copy_locked_nodes (call_frame_t *frame, xlator_t *this);
-
-static uint64_t afr_lock_number = 1;
-
-static uint64_t
-get_afr_lock_number ()
-{
- return (++afr_lock_number);
-}
-
-int
-afr_set_lock_number (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- int_lock->lock_number = get_afr_lock_number ();
-
- return 0;
-}
+#define LOCKED_NO 0x0 /* no lock held */
+#define LOCKED_YES 0x1 /* for DATA, METADATA, ENTRY and higher_path */
+#define LOCKED_LOWER 0x2 /* for lower path */
void
-afr_set_lk_owner (call_frame_t *frame, xlator_t *this, void *lk_owner)
-{
- gf_msg_trace (this->name, 0,
- "Setting lk-owner=%llu",
- (unsigned long long) (unsigned long)lk_owner);
-
- set_lk_owner_from_ptr (&frame->root->lk_owner, lk_owner);
-}
-
-static int
-is_afr_lock_selfheal (afr_local_t *local)
+afr_lockee_cleanup(afr_lockee_t *lockee)
{
- afr_internal_lock_t *int_lock = NULL;
- int ret = -1;
-
- int_lock = &local->internal_lock;
-
- switch (int_lock->selfheal_lk_type) {
- case AFR_DATA_SELF_HEAL_LK:
- case AFR_METADATA_SELF_HEAL_LK:
- ret = 1;
- break;
- case AFR_ENTRY_SELF_HEAL_LK:
- ret = 0;
- break;
- }
+ if (lockee->fd) {
+ fd_unref(lockee->fd);
+ lockee->fd = NULL;
+ } else {
+ loc_wipe(&lockee->loc);
+ }
- return ret;
+ GF_FREE(lockee->basename);
+ lockee->basename = NULL;
+ GF_FREE(lockee->locked_nodes);
+ lockee->locked_nodes = NULL;
-}
-
-int32_t
-internal_lock_count (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int32_t call_count = 0;
- int i = 0;
-
- local = frame->local;
- priv = this->private;
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i])
- ++call_count;
- }
-
- return call_count;
-}
-
-static void
-afr_print_inodelk (char *str, int size, int cmd,
- struct gf_flock *flock, gf_lkowner_t *owner)
-{
- char *cmd_str = NULL;
- char *type_str = NULL;
-
- switch (cmd) {
-#if F_GETLK != F_GETLK64
- case F_GETLK64:
-#endif
- case F_GETLK:
- cmd_str = "GETLK";
- break;
-
-#if F_SETLK != F_SETLK64
- case F_SETLK64:
-#endif
- case F_SETLK:
- cmd_str = "SETLK";
- break;
-
-#if F_SETLKW != F_SETLKW64
- case F_SETLKW64:
-#endif
- case F_SETLKW:
- cmd_str = "SETLKW";
- break;
-
- default:
- cmd_str = "<null>";
- break;
- }
-
- switch (flock->l_type) {
- case F_RDLCK:
- type_str = "READ";
- break;
- case F_WRLCK:
- type_str = "WRITE";
- break;
- case F_UNLCK:
- type_str = "UNLOCK";
- break;
- default:
- type_str = "UNKNOWN";
- break;
- }
-
- snprintf (str, size, "lock=INODELK, cmd=%s, type=%s, "
- "start=%llu, len=%llu, pid=%llu, lk-owner=%s",
- cmd_str, type_str, (unsigned long long) flock->l_start,
- (unsigned long long) flock->l_len,
- (unsigned long long) flock->l_pid,
- lkowner_utoa (owner));
-
-}
-
-static void
-afr_print_lockee (char *str, int size, loc_t *loc, fd_t *fd,
- int child_index)
-{
- snprintf (str, size, "path=%s, fd=%p, child=%d",
- loc->path ? loc->path : "<nul>",
- fd ? fd : NULL,
- child_index);
+ return;
}
void
-afr_print_entrylk (char *str, int size, const char *basename,
- gf_lkowner_t *owner)
-{
- snprintf (str, size, "Basename=%s, lk-owner=%s",
- basename ? basename : "<nul>",
- lkowner_utoa (owner));
-}
-
-static void
-afr_print_verdict (int op_ret, int op_errno, char *str)
+afr_lockees_cleanup(afr_internal_lock_t *int_lock)
{
- if (op_ret < 0) {
- if (op_errno == EAGAIN)
- strcpy (str, "EAGAIN");
- else
- strcpy (str, "FAILED");
- }
- else
- strcpy (str, "GRANTED");
-}
+ int i = 0;
-static void
-afr_set_lock_call_type (afr_lock_call_type_t lock_call_type,
- char *lock_call_type_str,
- afr_internal_lock_t *int_lock)
-{
- switch (lock_call_type) {
- case AFR_INODELK_TRANSACTION:
- if (int_lock->transaction_lk_type == AFR_TRANSACTION_LK)
- strcpy (lock_call_type_str, "AFR_INODELK_TRANSACTION");
- else
- strcpy (lock_call_type_str, "AFR_INODELK_SELFHEAL");
- break;
- case AFR_INODELK_NB_TRANSACTION:
- if (int_lock->transaction_lk_type == AFR_TRANSACTION_LK)
- strcpy (lock_call_type_str, "AFR_INODELK_NB_TRANSACTION");
- else
- strcpy (lock_call_type_str, "AFR_INODELK_NB_SELFHEAL");
- break;
- case AFR_ENTRYLK_TRANSACTION:
- if (int_lock->transaction_lk_type == AFR_TRANSACTION_LK)
- strcpy (lock_call_type_str, "AFR_ENTRYLK_TRANSACTION");
- else
- strcpy (lock_call_type_str, "AFR_ENTRYLK_SELFHEAL");
- break;
- case AFR_ENTRYLK_NB_TRANSACTION:
- if (int_lock->transaction_lk_type == AFR_TRANSACTION_LK)
- strcpy (lock_call_type_str, "AFR_ENTRYLK_NB_TRANSACTION");
- else
- strcpy (lock_call_type_str, "AFR_ENTRYLK_NB_SELFHEAL");
- break;
- default:
- strcpy (lock_call_type_str, "UNKNOWN");
- break;
- }
+ for (i = 0; i < int_lock->lockee_count; i++) {
+ afr_lockee_cleanup(&int_lock->lockee[i]);
+ }
+ return;
}
-
-static void
-afr_trace_inodelk_out (call_frame_t *frame, xlator_t *this,
- afr_lock_call_type_t lock_call_type,
- afr_lock_op_type_t lk_op_type, struct gf_flock *flock,
- int op_ret, int op_errno, int32_t child_index)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
-
- char lockee[256];
- char lock_call_type_str[256];
- char verdict[16];
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- afr_print_lockee (lockee, 256, &local->loc, local->fd, child_index);
-
- afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock);
-
- afr_print_verdict (op_ret, op_errno, verdict);
-
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO,
- "[%s %s] [%s] lk-owner=%s Lockee={%s} Number={%llu}",
- lock_call_type_str,
- lk_op_type == AFR_LOCK_OP ? "LOCK REPLY" : "UNLOCK REPLY",
- verdict, lkowner_utoa (&frame->root->lk_owner), lockee,
- (unsigned long long) int_lock->lock_number);
-
-}
-
-static void
-afr_trace_inodelk_in (call_frame_t *frame, xlator_t *this,
- afr_lock_call_type_t lock_call_type,
- afr_lock_op_type_t lk_op_type, struct gf_flock *flock,
- int32_t cmd, int32_t child_index)
-{
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
-
- char lock[256];
- char lockee[256];
- char lock_call_type_str[256];
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- afr_print_inodelk (lock, 256, cmd, flock, &frame->root->lk_owner);
- afr_print_lockee (lockee, 256, &local->loc, local->fd, child_index);
-
- afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock);
-
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO,
- "[%s %s] Lock={%s} Lockee={%s} Number={%llu}",
- lock_call_type_str,
- lk_op_type == AFR_LOCK_OP ? "LOCK REQUEST" : "UNLOCK REQUEST",
- lock, lockee,
- (unsigned long long) int_lock->lock_number);
-
+int
+afr_entry_lockee_cmp(const void *l1, const void *l2)
+{
+ const afr_lockee_t *r1 = l1;
+ const afr_lockee_t *r2 = l2;
+ int ret = 0;
+ uuid_t gfid1 = {0};
+ uuid_t gfid2 = {0};
+
+ loc_gfid((loc_t *)&r1->loc, gfid1);
+ loc_gfid((loc_t *)&r2->loc, gfid2);
+ ret = gf_uuid_compare(gfid1, gfid2);
+ /*Entrylks with NULL basename are the 'smallest'*/
+ if (ret == 0) {
+ if (!r1->basename)
+ return -1;
+ if (!r2->basename)
+ return 1;
+ ret = strcmp(r1->basename, r2->basename);
+ }
+
+ if (ret <= 0)
+ return -1;
+ else
+ return 1;
}
-static void
-afr_trace_entrylk_in (call_frame_t *frame, xlator_t *this,
- afr_lock_call_type_t lock_call_type,
- afr_lock_op_type_t lk_op_type, const char *basename,
- int32_t cookie)
-{
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- afr_private_t *priv = NULL;
- int child_index = 0;
- int lockee_no = 0;
-
- char lock[256];
- char lockee[256];
- char lock_call_type_str[256];
-
- local = frame->local;
- int_lock = &local->internal_lock;
- priv = this->private;
-
- if (!priv->entrylk_trace) {
- return;
- }
- lockee_no = cookie / priv->child_count;
- child_index = cookie % priv->child_count;
-
- afr_print_entrylk (lock, 256, basename, &frame->root->lk_owner);
- afr_print_lockee (lockee, 256, &int_lock->lockee[lockee_no].loc, local->fd,
- child_index);
-
- afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock);
-
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO,
- "[%s %s] Lock={%s} Lockee={%s} Number={%llu}, Cookie={%d}",
- lock_call_type_str,
- lk_op_type == AFR_LOCK_OP ? "LOCK REQUEST" : "UNLOCK REQUEST",
- lock, lockee,
- (unsigned long long) int_lock->lock_number,
- cookie);
-}
+int
+afr_lock_blocking(call_frame_t *frame, xlator_t *this, int child_index);
-static void
-afr_trace_entrylk_out (call_frame_t *frame, xlator_t *this,
- afr_lock_call_type_t lock_call_type,
- afr_lock_op_type_t lk_op_type, const char *basename,
- int op_ret, int op_errno, int32_t cookie)
+void
+afr_set_lk_owner(call_frame_t *frame, xlator_t *this, void *lk_owner)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int lockee_no = 0;
- int child_index = 0;
-
- char lock[256];
- char lockee[256];
- char lock_call_type_str[256];
- char verdict[16];
-
- local = frame->local;
- int_lock = &local->internal_lock;
- priv = this->private;
-
- if (!priv->entrylk_trace) {
- return;
- }
- lockee_no = cookie / priv->child_count;
- child_index = cookie % priv->child_count;
-
- afr_print_entrylk (lock, 256, basename, &frame->root->lk_owner);
- afr_print_lockee (lockee, 256, &int_lock->lockee[lockee_no].loc, local->fd,
- child_index);
-
- afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock);
-
- afr_print_verdict (op_ret, op_errno, verdict);
-
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO,
- "[%s %s] [%s] Lock={%s} Lockee={%s} Number={%llu} Cookie={%d}",
- lock_call_type_str,
- lk_op_type == AFR_LOCK_OP ? "LOCK REPLY" : "UNLOCK REPLY",
- verdict,
- lock, lockee,
- (unsigned long long) int_lock->lock_number,
- cookie);
+ gf_msg_trace(this->name, 0, "Setting lk-owner=%llu",
+ (unsigned long long)(unsigned long)lk_owner);
+ set_lk_owner_from_ptr(&frame->root->lk_owner, lk_owner);
}
-static int
-transaction_lk_op (afr_local_t *local)
+int32_t
+internal_lock_count(call_frame_t *frame, xlator_t *this)
{
- afr_internal_lock_t *int_lock = NULL;
- int ret = -1;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int32_t call_count = 0;
+ int i = 0;
- int_lock = &local->internal_lock;
+ local = frame->local;
+ priv = this->private;
- if (int_lock->transaction_lk_type == AFR_TRANSACTION_LK) {
- gf_msg_debug (THIS->name, 0,
- "lk op is for a transaction");
- ret = 1;
- }
- else if (int_lock->transaction_lk_type == AFR_SELFHEAL_LK) {
- gf_msg_debug (THIS->name, 0,
- "lk op is for a self heal");
-
- ret = 0;
- }
-
- if (ret == -1)
- gf_msg_debug (THIS->name, 0,
- "lk op is not set");
-
- return ret;
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->child_up[i])
+ ++call_count;
+ }
+ return call_count;
}
-static int
-is_afr_lock_transaction (afr_local_t *local)
+int
+afr_add_entry_lockee(afr_local_t *local, loc_t *loc, char *basename,
+ int child_count)
{
- int ret = 0;
+ int ret = -ENOMEM;
+ afr_internal_lock_t *int_lock = &local->internal_lock;
+ afr_lockee_t *lockee = &int_lock->lockee[int_lock->lockee_count];
- switch (local->transaction.type) {
- case AFR_DATA_TRANSACTION:
- case AFR_METADATA_TRANSACTION:
- ret = 1;
- break;
+ GF_ASSERT(int_lock->lockee_count < AFR_LOCKEE_COUNT_MAX);
+ loc_copy(&lockee->loc, loc);
+ lockee->basename = (basename) ? gf_strdup(basename) : NULL;
+ if (basename && !lockee->basename)
+ goto out;
- case AFR_ENTRY_RENAME_TRANSACTION:
- case AFR_ENTRY_TRANSACTION:
- ret = 0;
- break;
+ lockee->locked_count = 0;
+ lockee->locked_nodes = GF_CALLOC(child_count, sizeof(*lockee->locked_nodes),
+ gf_afr_mt_afr_node_character);
- }
+ if (!lockee->locked_nodes)
+ goto out;
- return ret;
+ ret = 0;
+ int_lock->lockee_count++;
+out:
+ if (ret) {
+ afr_lockee_cleanup(lockee);
+ }
+ return ret;
}
int
-afr_init_entry_lockee (afr_entry_lockee_t *lockee, afr_local_t *local,
- loc_t *loc, char *basename, int child_count)
+afr_add_inode_lockee(afr_local_t *local, int child_count)
{
- int ret = -1;
+ int ret = -ENOMEM;
+ afr_internal_lock_t *int_lock = &local->internal_lock;
+ afr_lockee_t *lockee = &int_lock->lockee[int_lock->lockee_count];
- loc_copy (&lockee->loc, loc);
- lockee->basename = (basename)? gf_strdup (basename): NULL;
- if (basename && !lockee->basename)
- goto out;
+ if (local->fd) {
+ lockee->fd = fd_ref(local->fd);
+ } else {
+ loc_copy(&lockee->loc, &local->loc);
+ }
- lockee->locked_count = 0;
- lockee->locked_nodes = GF_CALLOC (child_count,
- sizeof (*lockee->locked_nodes),
- gf_afr_mt_afr_node_character);
+ lockee->locked_count = 0;
+ lockee->locked_nodes = GF_CALLOC(child_count, sizeof(*lockee->locked_nodes),
+ gf_afr_mt_afr_node_character);
- if (!lockee->locked_nodes)
- goto out;
+ if (!lockee->locked_nodes)
+ goto out;
- ret = 0;
+ ret = 0;
+ int_lock->lockee_count++;
out:
- return ret;
-
-}
-
-void
-afr_entry_lockee_cleanup (afr_internal_lock_t *int_lock)
-{
- int i = 0;
-
- for (i = 0; i < int_lock->lockee_count; i++) {
- loc_wipe (&int_lock->lockee[i].loc);
- if (int_lock->lockee[i].basename)
- GF_FREE (int_lock->lockee[i].basename);
- if (int_lock->lockee[i].locked_nodes)
- GF_FREE (int_lock->lockee[i].locked_nodes);
- }
-
- return;
-}
-
-static int
-initialize_entrylk_variables (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- afr_private_t *priv = NULL;
-
- int i = 0;
-
- priv = this->private;
- local = frame->local;
- int_lock = &local->internal_lock;
-
- int_lock->entrylk_lock_count = 0;
- int_lock->lock_op_ret = -1;
- int_lock->lock_op_errno = 0;
-
- for (i = 0; i < AFR_LOCKEE_COUNT_MAX; i++) {
- if (!int_lock->lockee[i].locked_nodes)
- break;
- int_lock->lockee[i].locked_count = 0;
- memset (int_lock->lockee[i].locked_nodes, 0,
- sizeof (*int_lock->lockee[i].locked_nodes) *
- priv->child_count);
- }
-
- return 0;
+ if (ret) {
+ afr_lockee_cleanup(lockee);
+ }
+ return ret;
}
static int
-initialize_inodelk_variables (call_frame_t *frame, xlator_t *this)
+initialize_internal_lock_variables(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- afr_private_t *priv = NULL;
- afr_inodelk_t *inodelk = NULL;
+ afr_local_t *local = NULL;
+ afr_internal_lock_t *int_lock = NULL;
+ afr_private_t *priv = NULL;
- priv = this->private;
- local = frame->local;
- int_lock = &local->internal_lock;
+ int i = 0;
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
+ priv = this->private;
+ local = frame->local;
+ int_lock = &local->internal_lock;
- inodelk->lock_count = 0;
- int_lock->lk_attempted_count = 0;
- int_lock->lock_op_ret = -1;
- int_lock->lock_op_errno = 0;
+ int_lock->lock_count = 0;
+ int_lock->lock_op_ret = -1;
+ int_lock->lock_op_errno = 0;
+ int_lock->lk_attempted_count = 0;
- memset (inodelk->locked_nodes, 0,
- sizeof (*inodelk->locked_nodes) * priv->child_count);
- memset (int_lock->locked_nodes, 0,
- sizeof (*int_lock->locked_nodes) * priv->child_count);
+ for (i = 0; i < AFR_LOCKEE_COUNT_MAX; i++) {
+ if (!int_lock->lockee[i].locked_nodes)
+ break;
+ int_lock->lockee[i].locked_count = 0;
+ memset(int_lock->lockee[i].locked_nodes, 0,
+ sizeof(*int_lock->lockee[i].locked_nodes) * priv->child_count);
+ }
- return 0;
+ return 0;
}
int
-afr_lockee_locked_nodes_count (afr_internal_lock_t *int_lock)
+afr_lockee_locked_nodes_count(afr_internal_lock_t *int_lock)
{
- int call_count = 0;
- int i = 0;
+ int call_count = 0;
+ int i = 0;
- for (i = 0; i < int_lock->lockee_count; i++)
- call_count += int_lock->lockee[i].locked_count;
+ for (i = 0; i < int_lock->lockee_count; i++)
+ call_count += int_lock->lockee[i].locked_count;
- return call_count;
+ return call_count;
}
int
-afr_locked_nodes_count (unsigned char *locked_nodes, int child_count)
+afr_locked_nodes_count(unsigned char *locked_nodes, int child_count)
{
- int i = 0;
- int call_count = 0;
+ int i = 0;
+ int call_count = 0;
- for (i = 0; i < child_count; i++) {
- if (locked_nodes[i] & LOCKED_YES)
- call_count++;
- }
+ for (i = 0; i < child_count; i++) {
+ if (locked_nodes[i] & LOCKED_YES)
+ call_count++;
+ }
- return call_count;
+ return call_count;
}
-/* FIXME: What if UNLOCK fails */
-static int32_t
-afr_unlock_common_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+static void
+afr_log_locks_failure(call_frame_t *frame, char *where, char *what,
+ int op_errno)
{
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- int call_count = 0;
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- LOCK (&frame->lock);
- {
- call_count = --int_lock->lk_call_count;
- }
- UNLOCK (&frame->lock);
-
- if (call_count == 0) {
- gf_msg_trace (this->name, 0,
- "All internal locks unlocked");
+ xlator_t *this = frame->this;
+ gf_lkowner_t *lk_owner = &frame->root->lk_owner;
+ afr_local_t *local = frame->local;
+ const char *fop = NULL;
+ char *gfid = NULL;
+ const char *name = NULL;
- int_lock->lock_cbk (frame, this);
- }
+ fop = gf_fop_list[local->op];
- return 0;
+ switch (local->transaction.type) {
+ case AFR_ENTRY_RENAME_TRANSACTION:
+ case AFR_ENTRY_TRANSACTION:
+ switch (local->op) {
+ case GF_FOP_LINK:
+ gfid = uuid_utoa(local->newloc.pargfid);
+ name = local->newloc.name;
+ break;
+ default:
+ gfid = uuid_utoa(local->loc.pargfid);
+ name = local->loc.name;
+ break;
+ }
+ gf_msg(this->name, GF_LOG_WARNING, op_errno,
+ AFR_MSG_INTERNAL_LKS_FAILED,
+ "Unable to do entry %s with lk-owner:%s on %s "
+ "while attempting %s on {pgfid:%s, name:%s}.",
+ what, lkowner_utoa(lk_owner), where, fop, gfid, name);
+ break;
+ case AFR_DATA_TRANSACTION:
+ case AFR_METADATA_TRANSACTION:
+ gfid = uuid_utoa(local->inode->gfid);
+ gf_msg(this->name, GF_LOG_WARNING, op_errno,
+ AFR_MSG_INTERNAL_LKS_FAILED,
+ "Unable to do inode %s with lk-owner:%s on %s "
+ "while attempting %s on gfid:%s.",
+ what, lkowner_utoa(lk_owner), where, fop, gfid);
+ break;
+ }
}
static int32_t
-afr_unlock_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
+afr_unlock_common_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- afr_inodelk_t *inodelk = NULL;
- int32_t child_index = (long)cookie;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- AFR_TRACE_INODELK_OUT (frame, this, AFR_INODELK_TRANSACTION,
- AFR_UNLOCK_OP, NULL, op_ret,
- op_errno, child_index);
-
- priv = this->private;
-
- if (op_ret < 0 && op_errno != ENOTCONN && op_errno != EBADFD) {
- gf_msg (this->name, GF_LOG_ERROR, op_errno,
- AFR_MSG_INODE_UNLOCK_FAIL,
- "path=%s gfid=%s: unlock failed on subvolume %s "
- "with lock owner %s", local->loc.path,
- loc_gfid_utoa (&(local->loc)),
- priv->children[child_index]->name,
- lkowner_utoa (&frame->root->lk_owner));
- }
-
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_internal_lock_t *int_lock = NULL;
+ int lockee_num = 0;
+ int call_count = 0;
+ int child_index = 0;
+ int ret = 0;
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
- inodelk->locked_nodes[child_index] &= LOCKED_NO;
- if (local->transaction.eager_lock)
- local->transaction.eager_lock[child_index] = 0;
+ local = frame->local;
+ int_lock = &local->internal_lock;
+ priv = this->private;
+ lockee_num = (int)((long)cookie) / priv->child_count;
+ child_index = (int)((long)cookie) % priv->child_count;
- afr_unlock_common_cbk (frame, cookie, this, op_ret, op_errno, xdata);
+ if (op_ret < 0 && op_errno != ENOTCONN && op_errno != EBADFD) {
+ afr_log_locks_failure(frame, priv->children[child_index]->name,
+ "unlock", op_errno);
+ }
- return 0;
-
-}
+ int_lock->lockee[lockee_num].locked_nodes[child_index] &= LOCKED_NO;
+ if (local->transaction.type == AFR_DATA_TRANSACTION && op_ret != 1)
+ ret = afr_write_subvol_reset(frame, this);
-static int
-afr_unlock_inodelk (call_frame_t *frame, xlator_t *this)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_inodelk_t *inodelk = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- struct gf_flock flock = {0,};
- struct gf_flock full_flock = {0,};
- struct gf_flock *flock_use = NULL;
- int call_count = 0;
- int i = 0;
- int piggyback = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
+ LOCK(&frame->lock);
+ {
+ call_count = --int_lock->lk_call_count;
+ }
+ UNLOCK(&frame->lock);
+ if (call_count == 0) {
+ int_lock->lock_cbk(frame, this);
+ }
- local = frame->local;
- int_lock = &local->internal_lock;
- priv = this->private;
-
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
-
- flock.l_start = inodelk->flock.l_start;
- flock.l_len = inodelk->flock.l_len;
- flock.l_type = F_UNLCK;
-
- full_flock.l_type = F_UNLCK;
- call_count = afr_locked_nodes_count (inodelk->locked_nodes,
- priv->child_count);
-
- int_lock->lk_call_count = call_count;
-
- if (!call_count) {
- gf_msg_trace (this->name, 0,
- "No internal locks unlocked");
-
- int_lock->lock_cbk (frame, this);
- goto out;
- }
-
- if (local->fd)
- fd_ctx = afr_fd_ctx_get (local->fd, this);
-
- for (i = 0; i < priv->child_count; i++) {
- if ((inodelk->locked_nodes[i] & LOCKED_YES) != LOCKED_YES)
- continue;
-
- if (local->fd) {
- flock_use = &flock;
- if (!local->transaction.eager_lock[i]) {
- goto wind;
- }
-
- piggyback = 0;
-
- LOCK (&local->fd->lock);
- {
- if (fd_ctx->lock_piggyback[i]) {
- fd_ctx->lock_piggyback[i]--;
- piggyback = 1;
- } else {
- fd_ctx->lock_acquired[i]--;
- }
- }
- UNLOCK (&local->fd->lock);
-
- if (piggyback) {
- afr_unlock_inodelk_cbk (frame, (void *) (long) i,
- this, 1, 0, NULL);
- if (!--call_count)
- break;
- continue;
- }
-
- flock_use = &full_flock;
- wind:
- AFR_TRACE_INODELK_IN (frame, this,
- AFR_INODELK_TRANSACTION,
- AFR_UNLOCK_OP, flock_use, F_SETLK,
- i);
-
- STACK_WIND_COOKIE (frame, afr_unlock_inodelk_cbk,
- (void *) (long)i,
- priv->children[i],
- priv->children[i]->fops->finodelk,
- int_lock->domain, local->fd,
- F_SETLK, flock_use, NULL);
-
- if (!--call_count)
- break;
-
- } else {
- AFR_TRACE_INODELK_IN (frame, this,
- AFR_INODELK_TRANSACTION,
- AFR_UNLOCK_OP, &flock, F_SETLK, i);
-
- STACK_WIND_COOKIE (frame, afr_unlock_inodelk_cbk,
- (void *) (long)i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- int_lock->domain, &local->loc,
- F_SETLK, &flock, NULL);
-
- if (!--call_count)
- break;
- }
- }
-out:
- return 0;
+ return ret;
}
-static int32_t
-afr_unlock_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- afr_internal_lock_t *int_lock = NULL;
- int32_t child_index = 0;
- int lockee_no = 0;
-
- priv = this->private;
- lockee_no = (int)((long) cookie) / priv->child_count;
- child_index = (int) ((long) cookie) % priv->child_count;
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- AFR_TRACE_ENTRYLK_OUT (frame, this, AFR_ENTRYLK_TRANSACTION,
- AFR_UNLOCK_OP,
- int_lock->lockee[lockee_no].basename, op_ret,
- op_errno, (int) ((long)cookie));
-
- if (op_ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, op_errno,
- AFR_MSG_ENTRY_UNLOCK_FAIL,
- "%s: unlock failed on %s", local->loc.path,
- priv->children[child_index]->name);
- }
-
- int_lock->lockee[lockee_no].locked_nodes[child_index] &= LOCKED_NO;
- afr_unlock_common_cbk (frame, cookie, this, op_ret, op_errno, NULL);
+void
+afr_internal_lock_wind(call_frame_t *frame,
+ int32_t (*cbk)(call_frame_t *, void *, xlator_t *,
+ int32_t, int32_t, dict_t *),
+ void *cookie, int child, int lockee_num,
+ gf_boolean_t blocking, gf_boolean_t unlock)
+{
+ afr_local_t *local = frame->local;
+ xlator_t *this = frame->this;
+ afr_private_t *priv = this->private;
+ afr_internal_lock_t *int_lock = &local->internal_lock;
+ entrylk_cmd cmd = ENTRYLK_LOCK_NB;
+ int32_t cmd1 = F_SETLK;
+ struct gf_flock flock = {
+ 0,
+ };
+
+ switch (local->transaction.type) {
+ case AFR_ENTRY_TRANSACTION:
+ case AFR_ENTRY_RENAME_TRANSACTION:
+ if (unlock) {
+ cmd = ENTRYLK_UNLOCK;
+ } else if (blocking) { /*Doesn't make sense to have blocking
+ unlock*/
+ cmd = ENTRYLK_LOCK;
+ }
+
+ if (local->fd) {
+ STACK_WIND_COOKIE(frame, cbk, cookie, priv->children[child],
+ priv->children[child]->fops->fentrylk,
+ int_lock->domain,
+ int_lock->lockee[lockee_num].fd,
+ int_lock->lockee[lockee_num].basename, cmd,
+ ENTRYLK_WRLCK, NULL);
+ } else {
+ STACK_WIND_COOKIE(frame, cbk, cookie, priv->children[child],
+ priv->children[child]->fops->entrylk,
+ int_lock->domain,
+ &int_lock->lockee[lockee_num].loc,
+ int_lock->lockee[lockee_num].basename, cmd,
+ ENTRYLK_WRLCK, NULL);
+ }
+ break;
- return 0;
+ case AFR_DATA_TRANSACTION:
+ case AFR_METADATA_TRANSACTION:
+ flock = int_lock->lockee[lockee_num].flock;
+ if (unlock) {
+ flock.l_type = F_UNLCK;
+ } else if (blocking) { /*Doesn't make sense to have blocking
+ unlock*/
+ cmd1 = F_SETLKW;
+ }
+
+ if (local->fd) {
+ STACK_WIND_COOKIE(
+ frame, cbk, cookie, priv->children[child],
+ priv->children[child]->fops->finodelk, int_lock->domain,
+ int_lock->lockee[lockee_num].fd, cmd1, &flock, NULL);
+ } else {
+ STACK_WIND_COOKIE(
+ frame, cbk, cookie, priv->children[child],
+ priv->children[child]->fops->inodelk, int_lock->domain,
+ &int_lock->lockee[lockee_num].loc, cmd1, &flock, NULL);
+ }
+ break;
+ }
}
static int
-afr_unlock_entrylk (call_frame_t *frame, xlator_t *this)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = 0;
- int index = 0;
- int lockee_no = 0;
- int copies = 0;
- int i = -1;
-
- local = frame->local;
- int_lock = &local->internal_lock;
- priv = this->private;
- copies = priv->child_count;
-
- call_count = afr_lockee_locked_nodes_count (int_lock);
-
- int_lock->lk_call_count = call_count;
-
- if (!call_count){
- gf_msg_trace (this->name, 0,
- "No internal locks unlocked");
- int_lock->lock_cbk (frame, this);
- goto out;
- }
-
- for (i = 0; i < int_lock->lockee_count * priv->child_count; i++) {
- lockee_no = i / copies;
- index = i % copies;
- if (int_lock->lockee[lockee_no].locked_nodes[index] & LOCKED_YES) {
- AFR_TRACE_ENTRYLK_IN (frame, this, AFR_ENTRYLK_NB_TRANSACTION,
- AFR_UNLOCK_OP,
- int_lock->lockee[lockee_no].basename,
- i);
-
- STACK_WIND_COOKIE (frame, afr_unlock_entrylk_cbk,
- (void *) (long) i,
- priv->children[index],
- priv->children[index]->fops->entrylk,
- int_lock->domain,
- &int_lock->lockee[lockee_no].loc,
- int_lock->lockee[lockee_no].basename,
- ENTRYLK_UNLOCK, ENTRYLK_WRLCK, NULL);
-
- if (!--call_count)
- break;
- }
+afr_unlock_now(call_frame_t *frame, xlator_t *this)
+{
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+ int child_index = 0;
+ int lockee_num = 0;
+ int i = -1;
+
+ local = frame->local;
+ int_lock = &local->internal_lock;
+ priv = this->private;
+
+ call_count = afr_lockee_locked_nodes_count(int_lock);
+
+ int_lock->lk_call_count = call_count;
+
+ if (!call_count) {
+ gf_msg_trace(this->name, 0, "No internal locks unlocked");
+ int_lock->lock_cbk(frame, this);
+ goto out;
+ }
+
+ for (i = 0; i < int_lock->lockee_count * priv->child_count; i++) {
+ lockee_num = i / priv->child_count;
+ child_index = i % priv->child_count;
+ if (int_lock->lockee[lockee_num].locked_nodes[child_index] &
+ LOCKED_YES) {
+ afr_internal_lock_wind(frame, afr_unlock_common_cbk,
+ (void *)(long)i, child_index, lockee_num,
+ _gf_false, _gf_true);
+ if (!--call_count)
+ break;
}
+ }
out:
- return 0;
-
+ return 0;
}
static int32_t
-afr_lock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int cky = (long) cookie;
- int child_index = 0;
- int lockee_no = 0;
-
- priv = this->private;
- local = frame->local;
- int_lock = &local->internal_lock;
-
- child_index = ((int)cky) % priv->child_count;
- lockee_no = ((int)cky) / priv->child_count;
-
- LOCK (&frame->lock);
- {
- if (op_ret == -1) {
- if (op_errno == ENOSYS) {
- /* return ENOTSUP */
- gf_msg (this->name, GF_LOG_ERROR, ENOSYS,
- AFR_MSG_LOCK_XLATOR_NOT_LOADED,
- "subvolume does not support locking. "
- "please load features/locks xlator on server");
- local->op_ret = op_ret;
- int_lock->lock_op_ret = op_ret;
- }
-
- local->op_errno = op_errno;
- int_lock->lock_op_errno = op_errno;
- }
-
- int_lock->lk_attempted_count++;
- }
- UNLOCK (&frame->lock);
-
- if ((op_ret == -1) &&
- (op_errno == ENOSYS)) {
- afr_unlock (frame, this);
- } else {
- if (op_ret == 0) {
- if (local->transaction.type == AFR_ENTRY_TRANSACTION ||
- local->transaction.type == AFR_ENTRY_RENAME_TRANSACTION) {
- int_lock->lockee[lockee_no].locked_nodes[child_index] |= LOCKED_YES;
- int_lock->lockee[lockee_no].locked_count++;
- int_lock->entrylk_lock_count++;
- } else {
- int_lock->locked_nodes[child_index] |= LOCKED_YES;
- int_lock->lock_count++;
- }
+afr_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int cky = (long)cookie;
+ int child_index = 0;
+ int lockee_num = 0;
+
+ priv = this->private;
+ local = frame->local;
+ int_lock = &local->internal_lock;
+
+ child_index = ((int)cky) % priv->child_count;
+ lockee_num = ((int)cky) / priv->child_count;
+
+ LOCK(&frame->lock);
+ {
+ if (op_ret == -1) {
+ if (op_errno == ENOSYS) {
+ /* return ENOTSUP */
+ gf_msg(this->name, GF_LOG_ERROR, ENOSYS,
+ AFR_MSG_LOCK_XLATOR_NOT_LOADED,
+ "subvolume does not support locking. "
+ "please load features/locks xlator on server");
+ local->op_ret = op_ret;
+ int_lock->lock_op_ret = op_ret;
+ }
+
+ local->op_errno = op_errno;
+ int_lock->lock_op_errno = op_errno;
+ }
+
+ int_lock->lk_attempted_count++;
+ }
+ UNLOCK(&frame->lock);
+
+ if ((op_ret == -1) && (op_errno == ENOSYS)) {
+ afr_unlock_now(frame, this);
+ } else {
+ if (op_ret == 0) {
+ int_lock->lockee[lockee_num]
+ .locked_nodes[child_index] |= LOCKED_YES;
+ int_lock->lockee[lockee_num].locked_count++;
+ int_lock->lock_count++;
+ if (local->transaction.type == AFR_DATA_TRANSACTION) {
+ LOCK(&local->inode->lock);
+ {
+ local->inode_ctx->lock_count++;
}
- afr_lock_blocking (frame, this, cky + 1);
+ UNLOCK(&local->inode->lock);
+ }
}
+ afr_lock_blocking(frame, this, cky + 1);
+ }
- return 0;
-}
-
-static int32_t
-afr_blocking_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- AFR_TRACE_INODELK_OUT (frame, this, AFR_INODELK_TRANSACTION,
- AFR_LOCK_OP, NULL, op_ret,
- op_errno, (long) cookie);
-
- afr_lock_cbk (frame, cookie, this, op_ret, op_errno, xdata);
- return 0;
-
-}
-
-static int32_t
-afr_blocking_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- AFR_TRACE_ENTRYLK_OUT (frame, this, AFR_ENTRYLK_TRANSACTION,
- AFR_LOCK_OP, NULL, op_ret,
- op_errno, (long)cookie);
-
- afr_lock_cbk (frame, cookie, this, op_ret, op_errno, xdata);
- return 0;
-}
-
-static int
-afr_copy_locked_nodes (call_frame_t *frame, xlator_t *this)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_inodelk_t *inodelk = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- priv = this->private;
- local = frame->local;
- int_lock = &local->internal_lock;
-
- switch (local->transaction.type) {
- case AFR_DATA_TRANSACTION:
- case AFR_METADATA_TRANSACTION:
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
- memcpy (inodelk->locked_nodes, int_lock->locked_nodes,
- sizeof (*inodelk->locked_nodes) * priv->child_count);
- inodelk->lock_count = int_lock->lock_count;
- break;
-
- case AFR_ENTRY_RENAME_TRANSACTION:
- case AFR_ENTRY_TRANSACTION:
- /*entrylk_count is being used in both non-blocking and blocking
- * modes */
- break;
- }
-
- return 0;
-
+ return 0;
}
static gf_boolean_t
-afr_is_entrylk (afr_internal_lock_t *int_lock,
- afr_transaction_type trans_type)
+_is_lock_wind_needed(afr_local_t *local, int child_index)
{
- gf_boolean_t is_entrylk = _gf_false;
-
- if ((int_lock->transaction_lk_type == AFR_SELFHEAL_LK) &&
- int_lock->selfheal_lk_type == AFR_ENTRY_SELF_HEAL_LK) {
-
- is_entrylk = _gf_true;
-
- } else if ((int_lock->transaction_lk_type == AFR_TRANSACTION_LK) &&
- (trans_type == AFR_ENTRY_TRANSACTION ||
- trans_type == AFR_ENTRY_RENAME_TRANSACTION)) {
-
- is_entrylk = _gf_true;
+ if (!local->child_up[child_index])
+ return _gf_false;
- } else {
- is_entrylk = _gf_false;
- }
-
- return is_entrylk;
+ return _gf_true;
}
static gf_boolean_t
-_is_lock_wind_needed (afr_local_t *local, int child_index)
-{
- if (!local->child_up[child_index])
- return _gf_false;
-
- return _gf_true;
+is_blocking_locks_count_sufficient(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_internal_lock_t *int_lock = NULL;
+ int child = 0;
+ int nlockee = 0;
+ int lockee_count = 0;
+ gf_boolean_t ret = _gf_true;
+
+ local = frame->local;
+ priv = this->private;
+ int_lock = &local->internal_lock;
+ lockee_count = int_lock->lockee_count;
+
+ if (int_lock->lock_count == 0) {
+ afr_log_locks_failure(frame, "any subvolume", "lock",
+ int_lock->lock_op_errno);
+ return _gf_false;
+ }
+ /* For FOPS that take multiple sets of locks (mkdir, rename),
+ * there must be at least one brick on which the locks from
+ * all lock sets were successful. */
+ for (child = 0; child < priv->child_count; child++) {
+ ret = _gf_true;
+ for (nlockee = 0; nlockee < lockee_count; nlockee++) {
+ if (!(int_lock->lockee[nlockee].locked_nodes[child] & LOCKED_YES))
+ ret = _gf_false;
+ }
+ if (ret)
+ return ret;
+ }
+ if (!ret)
+ afr_log_locks_failure(frame, "all", "lock", int_lock->lock_op_errno);
+
+ return ret;
}
int
-afr_lock_blocking (call_frame_t *frame, xlator_t *this, int cookie)
+afr_lock_blocking(call_frame_t *frame, xlator_t *this, int cookie)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_inodelk_t *inodelk = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- struct gf_flock flock = {0,};
- uint64_t ctx = 0;
- int ret = 0;
- int child_index = 0;
- int lockee_no = 0;
- gf_boolean_t is_entrylk = _gf_false;
-
- local = frame->local;
- int_lock = &local->internal_lock;
- priv = this->private;
- child_index = cookie % priv->child_count;
- lockee_no = cookie / priv->child_count;
- is_entrylk = afr_is_entrylk (int_lock, local->transaction.type);
-
-
- if (!is_entrylk) {
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
- flock.l_start = inodelk->flock.l_start;
- flock.l_len = inodelk->flock.l_len;
- flock.l_type = inodelk->flock.l_type;
- }
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ uint64_t ctx = 0;
+ int ret = 0;
+ int child_index = 0;
+ int lockee_num = 0;
- if (local->fd) {
- ret = fd_ctx_get (local->fd, this, &ctx);
+ local = frame->local;
+ int_lock = &local->internal_lock;
+ priv = this->private;
+ child_index = cookie % priv->child_count;
+ lockee_num = cookie / priv->child_count;
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_FD_CTX_GET_FAILED,
- "unable to get fd ctx for fd=%p",
- local->fd);
+ if (local->fd) {
+ ret = fd_ctx_get(local->fd, this, &ctx);
- local->op_ret = -1;
- int_lock->lock_op_ret = -1;
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_FD_CTX_GET_FAILED,
+ "unable to get fd ctx for fd=%p", local->fd);
- afr_copy_locked_nodes (frame, this);
+ local->op_ret = -1;
+ int_lock->lock_op_ret = -1;
- afr_unlock (frame, this);
+ afr_unlock_now(frame, this);
- return 0;
- }
+ return 0;
}
+ }
- if (int_lock->lk_expected_count == int_lock->lk_attempted_count) {
- if ((is_entrylk && int_lock->entrylk_lock_count == 0) ||
- (!is_entrylk && int_lock->lock_count == 0)) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_BLOCKING_LKS_FAILED,
- "unable to lock on even one child");
-
- local->op_ret = -1;
- int_lock->lock_op_ret = -1;
-
- afr_copy_locked_nodes (frame, this);
+ if (int_lock->lk_expected_count == int_lock->lk_attempted_count) {
+ if (!is_blocking_locks_count_sufficient(frame, this)) {
+ local->op_ret = -1;
+ int_lock->lock_op_ret = -1;
- afr_unlock(frame, this);
+ afr_unlock_now(frame, this);
- return 0;
- }
+ return 0;
}
+ }
- if (int_lock->lk_expected_count == int_lock->lk_attempted_count) {
- /* we're done locking */
-
- gf_msg_debug (this->name, 0,
- "we're done locking");
+ if (int_lock->lk_expected_count == int_lock->lk_attempted_count) {
+ /* we're done locking */
- afr_copy_locked_nodes (frame, this);
+ gf_msg_debug(this->name, 0, "we're done locking");
- int_lock->lock_op_ret = 0;
- int_lock->lock_cbk (frame, this);
- return 0;
- }
-
- if (!_is_lock_wind_needed (local, child_index)) {
- afr_lock_blocking (frame, this, cookie + 1);
- return 0;
- }
-
- switch (local->transaction.type) {
- case AFR_DATA_TRANSACTION:
- case AFR_METADATA_TRANSACTION:
-
- if (local->fd) {
- AFR_TRACE_INODELK_IN (frame, this,
- AFR_INODELK_TRANSACTION,
- AFR_LOCK_OP, &flock, F_SETLKW,
- child_index);
-
- STACK_WIND_COOKIE (frame, afr_blocking_inodelk_cbk,
- (void *) (long) child_index,
- priv->children[child_index],
- priv->children[child_index]->fops->finodelk,
- int_lock->domain, local->fd,
- F_SETLKW, &flock, NULL);
-
- } else {
- AFR_TRACE_INODELK_IN (frame, this,
- AFR_INODELK_TRANSACTION,
- AFR_LOCK_OP, &flock, F_SETLKW,
- child_index);
-
- STACK_WIND_COOKIE (frame, afr_blocking_inodelk_cbk,
- (void *) (long) child_index,
- priv->children[child_index],
- priv->children[child_index]->fops->inodelk,
- int_lock->domain, &local->loc,
- F_SETLKW, &flock, NULL);
- }
+ int_lock->lock_op_ret = 0;
+ int_lock->lock_cbk(frame, this);
+ return 0;
+ }
- break;
+ if (!_is_lock_wind_needed(local, child_index)) {
+ afr_lock_blocking(frame, this, cookie + 1);
+ return 0;
+ }
- case AFR_ENTRY_RENAME_TRANSACTION:
- case AFR_ENTRY_TRANSACTION:
- /*Accounting for child_index increments on 'down'
- *and 'fd-less' children */
-
- if (local->fd) {
- AFR_TRACE_ENTRYLK_IN (frame, this, AFR_ENTRYLK_TRANSACTION,
- AFR_LOCK_OP,
- int_lock->lockee[lockee_no].basename,
- cookie);
-
- STACK_WIND_COOKIE (frame, afr_blocking_entrylk_cbk,
- (void *) (long) cookie,
- priv->children[child_index],
- priv->children[child_index]->fops->fentrylk,
- int_lock->domain, local->fd,
- int_lock->lockee[lockee_no].basename,
- ENTRYLK_LOCK, ENTRYLK_WRLCK, NULL);
- } else {
- AFR_TRACE_ENTRYLK_IN (frame, this,
- AFR_ENTRYLK_TRANSACTION,
- AFR_LOCK_OP, local->transaction.basename,
- child_index);
-
- STACK_WIND_COOKIE (frame, afr_blocking_entrylk_cbk,
- (void *) (long) cookie,
- priv->children[child_index],
- priv->children[child_index]->fops->entrylk,
- int_lock->domain,
- &int_lock->lockee[lockee_no].loc,
- int_lock->lockee[lockee_no].basename,
- ENTRYLK_LOCK, ENTRYLK_WRLCK, NULL);
- }
+ afr_internal_lock_wind(frame, afr_lock_cbk, (void *)(long)cookie,
+ child_index, lockee_num, _gf_true, _gf_false);
- break;
- }
-
- return 0;
+ return 0;
}
int32_t
-afr_blocking_lock (call_frame_t *frame, xlator_t *this)
+afr_blocking_lock(call_frame_t *frame, xlator_t *this)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int up_count = 0;
-
- priv = this->private;
- local = frame->local;
- int_lock = &local->internal_lock;
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int up_count = 0;
- switch (local->transaction.type) {
- case AFR_DATA_TRANSACTION:
- case AFR_METADATA_TRANSACTION:
- initialize_inodelk_variables (frame, this);
- break;
+ priv = this->private;
+ local = frame->local;
+ int_lock = &local->internal_lock;
- case AFR_ENTRY_RENAME_TRANSACTION:
- case AFR_ENTRY_TRANSACTION:
- up_count = AFR_COUNT (local->child_up, priv->child_count);
- int_lock->lk_call_count = int_lock->lk_expected_count
- = (int_lock->lockee_count *
- up_count);
- initialize_entrylk_variables (frame, this);
- break;
- }
+ up_count = AFR_COUNT(local->child_up, priv->child_count);
+ int_lock->lk_call_count = int_lock->lk_expected_count =
+ (int_lock->lockee_count * up_count);
+ initialize_internal_lock_variables(frame, this);
- afr_lock_blocking (frame, this, 0);
+ afr_lock_blocking(frame, this, 0);
- return 0;
+ return 0;
}
static int32_t
-afr_nonblocking_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
- int call_count = 0;
- int child_index = (long) cookie;
- int copies = 0;
- int index = 0;
- int lockee_no = 0;
- afr_private_t *priv = NULL;
-
- priv = this->private;
-
- copies = priv->child_count;
- index = child_index % copies;
- lockee_no = child_index / copies;
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- AFR_TRACE_ENTRYLK_OUT (frame, this, AFR_ENTRYLK_TRANSACTION,
- AFR_LOCK_OP,
- int_lock->lockee[lockee_no].basename, op_ret,
- op_errno, (long) cookie);
-
- LOCK (&frame->lock);
- {
- if (op_ret < 0 ) {
- if (op_errno == ENOSYS) {
- /* return ENOTSUP */
- gf_msg (this->name, GF_LOG_ERROR,
- ENOSYS, AFR_MSG_LOCK_XLATOR_NOT_LOADED,
- "subvolume does not support "
- "locking. please load features/locks"
- " xlator on server");
- local->op_ret = op_ret;
- int_lock->lock_op_ret = op_ret;
-
- int_lock->lock_op_errno = op_errno;
- local->op_errno = op_errno;
- }
- } else if (op_ret == 0) {
- int_lock->lockee[lockee_no].locked_nodes[index] |= \
- LOCKED_YES;
- int_lock->lockee[lockee_no].locked_count++;
- int_lock->entrylk_lock_count++;
- }
-
- call_count = --int_lock->lk_call_count;
- }
- UNLOCK (&frame->lock);
-
- if (call_count == 0) {
- gf_msg_trace (this->name, 0,
- "Last locking reply received");
- /* all locks successful. Proceed to call FOP */
- if (int_lock->entrylk_lock_count ==
- int_lock->lk_expected_count) {
- gf_msg_trace (this->name, 0,
- "All servers locked. Calling the cbk");
- int_lock->lock_op_ret = 0;
- int_lock->lock_cbk (frame, this);
- }
- /* Not all locks were successful. Unlock and try locking
- again, this time with serially blocking locks */
- else {
- gf_msg_trace (this->name, 0,
- "%d servers locked. Trying again "
- "with blocking calls",
- int_lock->lock_count);
-
- afr_unlock(frame, this);
- }
- }
-
- return 0;
-}
-
-int
-afr_nonblocking_entrylk (call_frame_t *frame, xlator_t *this)
+afr_nb_internal_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
- int copies = 0;
- int index = 0;
- int lockee_no = 0;
- int32_t call_count = 0;
- int i = 0;
-
- local = frame->local;
- int_lock = &local->internal_lock;
- priv = this->private;
-
- copies = priv->child_count;
- initialize_entrylk_variables (frame, this);
-
- if (local->fd) {
- fd_ctx = afr_fd_ctx_get (local->fd, this);
- if (!fd_ctx) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_FD_CTX_GET_FAILED,
- "unable to get fd ctx for fd=%p",
- local->fd);
-
- local->op_ret = -1;
- int_lock->lock_op_ret = -1;
- local->op_errno = EINVAL;
- int_lock->lock_op_errno = EINVAL;
-
- afr_unlock (frame, this);
- return -1;
- }
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+ int call_count = 0;
+ int child_index = 0;
+ int lockee_num = 0;
+ afr_private_t *priv = NULL;
- call_count = int_lock->lockee_count * internal_lock_count (frame, this);
- int_lock->lk_call_count = call_count;
- int_lock->lk_expected_count = call_count;
+ priv = this->private;
- if (!call_count) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_INFO_COMMON,
- "fd not open on any subvolumes. aborting.");
- afr_unlock (frame, this);
- goto out;
- }
+ child_index = ((long)cookie) % priv->child_count;
+ lockee_num = ((long)cookie) / priv->child_count;
- /* Send non-blocking entrylk calls only on up children
- and where the fd has been opened */
- for (i = 0; i < int_lock->lockee_count*priv->child_count; i++) {
- index = i%copies;
- lockee_no = i/copies;
- if (local->child_up[index]) {
- AFR_TRACE_ENTRYLK_IN (frame, this, AFR_ENTRYLK_NB_TRANSACTION,
- AFR_LOCK_OP,
- int_lock->lockee[lockee_no].basename,
- i);
-
- STACK_WIND_COOKIE (frame, afr_nonblocking_entrylk_cbk,
- (void *) (long) i,
- priv->children[index],
- priv->children[index]->fops->fentrylk,
- this->name, local->fd,
- int_lock->lockee[lockee_no].basename,
- ENTRYLK_LOCK_NB, ENTRYLK_WRLCK,
- NULL);
- if (!--call_count)
- break;
- }
- }
- } else {
- call_count = int_lock->lockee_count * internal_lock_count (frame, this);
- int_lock->lk_call_count = call_count;
- int_lock->lk_expected_count = call_count;
-
- for (i = 0; i < int_lock->lockee_count*priv->child_count; i++) {
- index = i%copies;
- lockee_no = i/copies;
- if (local->child_up[index]) {
- AFR_TRACE_ENTRYLK_IN (frame, this, AFR_ENTRYLK_NB_TRANSACTION,
- AFR_LOCK_OP,
- int_lock->lockee[lockee_no].basename,
- i);
-
- STACK_WIND_COOKIE (frame, afr_nonblocking_entrylk_cbk,
- (void *) (long) i,
- priv->children[index],
- priv->children[index]->fops->entrylk,
- this->name, &int_lock->lockee[lockee_no].loc,
- int_lock->lockee[lockee_no].basename,
- ENTRYLK_LOCK_NB, ENTRYLK_WRLCK,
- NULL);
-
- if (!--call_count)
- break;
- }
- }
- }
-out:
- return 0;
-}
+ local = frame->local;
+ int_lock = &local->internal_lock;
-int32_t
-afr_nonblocking_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_inodelk_t *inodelk = NULL;
- afr_local_t *local = NULL;
- int call_count = 0;
- int child_index = (long) cookie;
- afr_fd_ctx_t *fd_ctx = NULL;
-
-
- local = frame->local;
- int_lock = &local->internal_lock;
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
-
- AFR_TRACE_INODELK_OUT (frame, this, AFR_INODELK_NB_TRANSACTION,
- AFR_LOCK_OP, NULL, op_ret,
- op_errno, (long) cookie);
-
- if (local->fd)
- fd_ctx = afr_fd_ctx_get (local->fd, this);
-
- LOCK (&frame->lock);
+ if (op_ret == 0 && local->transaction.type == AFR_DATA_TRANSACTION) {
+ LOCK(&local->inode->lock);
{
- if (op_ret < 0) {
- if (op_errno == ENOSYS) {
- /* return ENOTSUP */
- gf_msg (this->name, GF_LOG_ERROR, ENOSYS,
- AFR_MSG_LOCK_XLATOR_NOT_LOADED,
- "subvolume does not support "
- "locking. please load features/locks"
- " xlator on server");
- local->op_ret = op_ret;
- int_lock->lock_op_ret = op_ret;
- int_lock->lock_op_errno = op_errno;
- local->op_errno = op_errno;
- }
- if (local->transaction.eager_lock)
- local->transaction.eager_lock[child_index] = 0;
- } else {
- inodelk->locked_nodes[child_index] |= LOCKED_YES;
- inodelk->lock_count++;
-
- if (local->transaction.eager_lock &&
- local->transaction.eager_lock[child_index] &&
- local->fd) {
- /* piggybacked */
- if (op_ret == 1) {
- /* piggybacked */
- } else if (op_ret == 0) {
- /* lock acquired from server */
- fd_ctx->lock_acquired[child_index]++;
- }
- }
- }
-
- call_count = --int_lock->lk_call_count;
- }
- UNLOCK (&frame->lock);
-
- if (call_count == 0) {
- gf_msg_trace (this->name, 0,
- "Last inode locking reply received");
- /* all locks successful. Proceed to call FOP */
- if (inodelk->lock_count == int_lock->lk_expected_count) {
- gf_msg_trace (this->name, 0,
- "All servers locked. Calling the cbk");
- int_lock->lock_op_ret = 0;
- int_lock->lock_cbk (frame, this);
- }
- /* Not all locks were successful. Unlock and try locking
- again, this time with serially blocking locks */
- else {
- gf_msg_trace (this->name, 0,
- "%d servers locked. "
- "Trying again with blocking calls",
- int_lock->lock_count);
-
- afr_unlock(frame, this);
- }
+ local->inode_ctx->lock_count++;
}
+ UNLOCK(&local->inode->lock);
+ }
- return 0;
+ LOCK(&frame->lock);
+ {
+ if (op_ret < 0) {
+ if (op_errno == ENOSYS) {
+ /* return ENOTSUP */
+ gf_msg(this->name, GF_LOG_ERROR, ENOSYS,
+ AFR_MSG_LOCK_XLATOR_NOT_LOADED,
+ "subvolume does not support "
+ "locking. please load features/locks"
+ " xlator on server");
+ local->op_ret = op_ret;
+ int_lock->lock_op_ret = op_ret;
+
+ int_lock->lock_op_errno = op_errno;
+ local->op_errno = op_errno;
+ }
+ } else if (op_ret == 0) {
+ int_lock->lockee[lockee_num]
+ .locked_nodes[child_index] |= LOCKED_YES;
+ int_lock->lockee[lockee_num].locked_count++;
+ int_lock->lock_count++;
+ }
+
+ call_count = --int_lock->lk_call_count;
+ }
+ UNLOCK(&frame->lock);
+
+ if (call_count == 0) {
+ gf_msg_trace(this->name, 0, "Last locking reply received");
+ /* all locks successful. Proceed to call FOP */
+ if (int_lock->lock_count == int_lock->lk_expected_count) {
+ gf_msg_trace(this->name, 0, "All servers locked. Calling the cbk");
+ int_lock->lock_op_ret = 0;
+ int_lock->lock_cbk(frame, this);
+ }
+ /* Not all locks were successful. Unlock and try locking
+ again, this time with serially blocking locks */
+ else {
+ gf_msg_trace(this->name, 0,
+ "%d servers locked. Trying again "
+ "with blocking calls",
+ int_lock->lock_count);
+
+ afr_unlock_now(frame, this);
+ }
+ }
+
+ return 0;
}
int
-afr_nonblocking_inodelk (call_frame_t *frame, xlator_t *this)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_inodelk_t *inodelk = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
- int32_t call_count = 0;
- int i = 0;
- int ret = 0;
- struct gf_flock flock = {0,};
- struct gf_flock full_flock = {0,};
- struct gf_flock *flock_use = NULL;
- int piggyback = 0;
-
- local = frame->local;
- int_lock = &local->internal_lock;
- priv = this->private;
-
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
-
- flock.l_start = inodelk->flock.l_start;
- flock.l_len = inodelk->flock.l_len;
- flock.l_type = inodelk->flock.l_type;
-
- full_flock.l_type = inodelk->flock.l_type;
-
- initialize_inodelk_variables (frame, this);
-
- if (local->fd) {
- fd_ctx = afr_fd_ctx_get (local->fd, this);
- if (!fd_ctx) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_FD_CTX_GET_FAILED,
- "unable to get fd ctx for fd=%p",
- local->fd);
-
- local->op_ret = -1;
- int_lock->lock_op_ret = -1;
- local->op_errno = EINVAL;
- int_lock->lock_op_errno = EINVAL;
-
- afr_unlock (frame, this);
- ret = -1;
- goto out;
- }
-
- call_count = internal_lock_count (frame, this);
- int_lock->lk_call_count = call_count;
- int_lock->lk_expected_count = call_count;
-
- if (!call_count) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_ALL_SUBVOLS_DOWN,
- "All bricks are down, aborting.");
- afr_unlock (frame, this);
- goto out;
- }
-
- /* Send non-blocking inodelk calls only on up children
- and where the fd has been opened */
- for (i = 0; i < priv->child_count; i++) {
- if (!local->child_up[i])
- continue;
-
- flock_use = &flock;
- if (!local->transaction.eager_lock_on) {
- goto wind;
- }
-
- piggyback = 0;
- local->transaction.eager_lock[i] = 1;
-
- afr_set_delayed_post_op (frame, this);
-
- LOCK (&local->fd->lock);
- {
- if (fd_ctx->lock_acquired[i]) {
- fd_ctx->lock_piggyback[i]++;
- piggyback = 1;
- }
- }
- UNLOCK (&local->fd->lock);
-
- if (piggyback) {
- /* (op_ret == 1) => indicate piggybacked lock */
- afr_nonblocking_inodelk_cbk (frame, (void *) (long) i,
- this, 1, 0, NULL);
- if (!--call_count)
- break;
- continue;
- }
- flock_use = &full_flock;
- wind:
- AFR_TRACE_INODELK_IN (frame, this,
- AFR_INODELK_NB_TRANSACTION,
- AFR_LOCK_OP, flock_use, F_SETLK, i);
-
- STACK_WIND_COOKIE (frame, afr_nonblocking_inodelk_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->finodelk,
- int_lock->domain, local->fd,
- F_SETLK, flock_use, NULL);
-
- if (!--call_count)
- break;
- }
- } else {
- call_count = internal_lock_count (frame, this);
- int_lock->lk_call_count = call_count;
- int_lock->lk_expected_count = call_count;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->child_up[i])
- continue;
- AFR_TRACE_INODELK_IN (frame, this,
- AFR_INODELK_NB_TRANSACTION,
- AFR_LOCK_OP, &flock, F_SETLK, i);
-
- STACK_WIND_COOKIE (frame, afr_nonblocking_inodelk_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->inodelk,
- int_lock->domain, &local->loc,
- F_SETLK, &flock, NULL);
-
- if (!--call_count)
- break;
- }
+afr_lock_nonblocking(call_frame_t *frame, xlator_t *this)
+{
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int child = 0;
+ int lockee_num = 0;
+ int32_t call_count = 0;
+ int i = 0;
+ int ret = 0;
+
+ local = frame->local;
+ int_lock = &local->internal_lock;
+ priv = this->private;
+
+ initialize_internal_lock_variables(frame, this);
+
+ if (local->fd) {
+ fd_ctx = afr_fd_ctx_get(local->fd, this);
+ if (!fd_ctx) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_FD_CTX_GET_FAILED,
+ "unable to get fd ctx for fd=%p", local->fd);
+
+ local->op_ret = -1;
+ int_lock->lock_op_ret = -1;
+ local->op_errno = EINVAL;
+ int_lock->lock_op_errno = EINVAL;
+
+ afr_unlock_now(frame, this);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ call_count = int_lock->lockee_count * internal_lock_count(frame, this);
+ int_lock->lk_call_count = call_count;
+ int_lock->lk_expected_count = call_count;
+
+ if (!call_count) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_INFO_COMMON,
+ "fd not open on any subvolumes. aborting.");
+ afr_unlock_now(frame, this);
+ goto out;
+ }
+
+ /* Send non-blocking lock calls only on up children
+ and where the fd has been opened */
+ for (i = 0; i < int_lock->lockee_count * priv->child_count; i++) {
+ child = i % priv->child_count;
+ lockee_num = i / priv->child_count;
+ if (local->child_up[child]) {
+ afr_internal_lock_wind(frame, afr_nb_internal_lock_cbk,
+ (void *)(long)i, child, lockee_num,
+ _gf_false, _gf_false);
+ if (!--call_count)
+ break;
}
+ }
out:
- return ret;
+ return ret;
}
int32_t
-afr_unlock (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
-
- local = frame->local;
-
- if (transaction_lk_op (local)) {
- if (is_afr_lock_transaction (local))
- afr_unlock_inodelk (frame, this);
- else
- afr_unlock_entrylk (frame, this);
-
- } else {
- if (is_afr_lock_selfheal (local))
- afr_unlock_inodelk (frame, this);
- else
- afr_unlock_entrylk (frame, this);
- }
-
+afr_unlock(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_lock_t *lock = NULL;
+
+ local = frame->local;
+
+ if (!local->transaction.eager_lock_on)
+ goto out;
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ LOCK(&local->inode->lock);
+ {
+ list_del_init(&local->transaction.owner_list);
+ if (list_empty(&lock->owners) && list_empty(&lock->post_op)) {
+ local->transaction.do_eager_unlock = _gf_true;
+ /*TODO: Need to get metadata use on_disk and inherit/uninherit
+ *GF_ASSERT (!local->inode_ctx->on_disk[local->transaction.type]);
+ *GF_ASSERT (!local->inode_ctx->inherited[local->transaction.type]);
+ */
+ GF_ASSERT(lock->release);
+ }
+ }
+ UNLOCK(&local->inode->lock);
+ if (!local->transaction.do_eager_unlock) {
+ local->internal_lock.lock_cbk(frame, this);
return 0;
-}
-
-int
-afr_lk_transfer_datalock (call_frame_t *dst, call_frame_t *src, char *dom,
- unsigned int child_count)
-{
- afr_local_t *dst_local = NULL;
- afr_local_t *src_local = NULL;
- afr_internal_lock_t *dst_lock = NULL;
- afr_internal_lock_t *src_lock = NULL;
- afr_inodelk_t *dst_inodelk = NULL;
- afr_inodelk_t *src_inodelk = NULL;
- int ret = -1;
-
- src_local = src->local;
- src_lock = &src_local->internal_lock;
- src_inodelk = afr_get_inodelk (src_lock, dom);
- dst_local = dst->local;
- dst_lock = &dst_local->internal_lock;
- dst_inodelk = afr_get_inodelk (dst_lock, dom);
- if (!dst_inodelk || !src_inodelk)
- goto out;
- if (src_inodelk->locked_nodes) {
- memcpy (dst_inodelk->locked_nodes, src_inodelk->locked_nodes,
- sizeof (*dst_inodelk->locked_nodes) * child_count);
- memset (src_inodelk->locked_nodes, 0,
- sizeof (*src_inodelk->locked_nodes) * child_count);
- }
+ }
- dst_lock->transaction_lk_type = src_lock->transaction_lk_type;
- dst_lock->selfheal_lk_type = src_lock->selfheal_lk_type;
- dst_inodelk->lock_count = src_inodelk->lock_count;
- src_inodelk->lock_count = 0;
- ret = 0;
out:
- return ret;
+ afr_unlock_now(frame, this);
+ return 0;
}
diff --git a/xlators/cluster/afr/src/afr-mem-types.h b/xlators/cluster/afr/src/afr-mem-types.h
index 7f7962013d7..816065fb57a 100644
--- a/xlators/cluster/afr/src/afr-mem-types.h
+++ b/xlators/cluster/afr/src/afr-mem-types.h
@@ -8,45 +8,31 @@
cases as published by the Free Software Foundation.
*/
-
#ifndef __AFR_MEM_TYPES_H__
#define __AFR_MEM_TYPES_H__
-#include "mem-types.h"
+#include <glusterfs/mem-types.h>
enum gf_afr_mem_types_ {
- gf_afr_mt_iovec = gf_common_mt_end + 1,
- gf_afr_mt_afr_fd_ctx_t,
- gf_afr_mt_afr_private_t,
- gf_afr_mt_int32_t,
- gf_afr_mt_char,
- gf_afr_mt_xattr_key,
- gf_afr_mt_dict_t,
- gf_afr_mt_xlator_t,
- gf_afr_mt_iatt,
- gf_afr_mt_int,
- gf_afr_mt_afr_node_character,
- gf_afr_mt_sh_diff_loop_state,
- gf_afr_mt_uint8_t,
- gf_afr_mt_loc_t,
- gf_afr_mt_entry_name,
- gf_afr_mt_pump_priv,
- gf_afr_mt_locked_fd,
- gf_afr_mt_inode_ctx_t,
- gf_afr_fd_paused_call_t,
- gf_afr_mt_crawl_data_t,
- gf_afr_mt_brick_pos_t,
- gf_afr_mt_shd_bool_t,
- gf_afr_mt_shd_timer_t,
- gf_afr_mt_shd_event_t,
- gf_afr_mt_time_t,
- gf_afr_mt_pos_data_t,
- gf_afr_mt_reply_t,
- gf_afr_mt_subvol_healer_t,
- gf_afr_mt_spbc_timeout_t,
- gf_afr_mt_spb_status_t,
- gf_afr_mt_empty_brick_t,
- gf_afr_mt_end
+ gf_afr_mt_afr_fd_ctx_t = gf_common_mt_end + 1,
+ gf_afr_mt_afr_private_t,
+ gf_afr_mt_int32_t,
+ gf_afr_mt_char,
+ gf_afr_mt_xattr_key,
+ gf_afr_mt_dict_t,
+ gf_afr_mt_xlator_t,
+ gf_afr_mt_afr_node_character,
+ gf_afr_mt_inode_ctx_t,
+ gf_afr_mt_shd_event_t,
+ gf_afr_mt_reply_t,
+ gf_afr_mt_subvol_healer_t,
+ gf_afr_mt_spbc_timeout_t,
+ gf_afr_mt_spb_status_t,
+ gf_afr_mt_empty_brick_t,
+ gf_afr_mt_child_latency_t,
+ gf_afr_mt_atomic_t,
+ gf_afr_mt_lk_heal_info_t,
+ gf_afr_mt_gf_lock,
+ gf_afr_mt_end
};
#endif
-
diff --git a/xlators/cluster/afr/src/afr-messages.h b/xlators/cluster/afr/src/afr-messages.h
index 37736f5afa3..e73fd997765 100644
--- a/xlators/cluster/afr/src/afr-messages.h
+++ b/xlators/cluster/afr/src/afr-messages.h
@@ -11,347 +11,157 @@
#ifndef _AFR_MESSAGES_H_
#define _AFR_MESSAGES_H_
-#include "glfs-message-id.h"
-
-/*! \file afr-messages.h
- * \brief AFR log-message IDs and their descriptions.
- */
-
-/* NOTE: Rules for message additions
- * 1) Each instance of a message is _better_ left with a unique message ID, even
- * if the message format is the same. Reasoning is that, if the message
- * format needs to change in one instance, the other instances are not
- * impacted or the new change does not change the ID of the instance being
- * modified.
- * 2) Addition of a message,
- * - Should increment the GLFS_NUM_MESSAGES
- * - Append to the list of messages defined, towards the end
- * - Retain macro naming as glfs_msg_X (for redability across developers)
- * NOTE: Rules for message format modifications
- * 3) Check acorss the code if the message ID macro in question is reused
- * anywhere. If reused then then the modifications should ensure correctness
- * everywhere, or needs a new message ID as (1) above was not adhered to. If
- * not used anywhere, proceed with the required modification.
- * NOTE: Rules for message deletion
- * 4) Check (3) and if used anywhere else, then cannot be deleted. If not used
- * anywhere, then can be deleted, but will leave a hole by design, as
- * addition rules specify modification to the end of the list and not filling
- * holes.
- */
-
-#define GLFS_COMP_BASE_AFR GLFS_MSGID_COMP_AFR
-#define GLFS_NUM_MESSAGES 40
-#define GLFS_MSGID_END (GLFS_COMP_BASE_AFR + GLFS_NUM_MESSAGES + 1)
-
-#define glfs_msg_start_x GLFS_COMP_BASE_AFR, "Invalid: Start of messages"
-
-/*!
- * @messageid 108001
- * @diagnosis Client quorum is not met due to which file modification
- * operations are disallowed.
- * @recommendedaction Some brick processes are down/ not visible from the
- * client. Ensure that the bricks are up/ network traffic is not blocked.
- */
-#define AFR_MSG_QUORUM_FAIL (GLFS_COMP_BASE_AFR + 1)
-
-
-/*!
- * @messageid 108002
- * @diagnosis The bricks that were down are now up and quorum is restored.
- * @recommendedaction Possibly check why the bricks went down to begin with.
- */
-#define AFR_MSG_QUORUM_MET (GLFS_COMP_BASE_AFR + 2)
-
-
-/*!
- * @messageid 108003
- * @diagnosis Client quorum-type was set to auto due to which the quorum-count
- * option is no longer valid.
- * @recommendedaction None.
- */
-#define AFR_MSG_QUORUM_OVERRIDE (GLFS_COMP_BASE_AFR + 3)
-
-
-/*!
- * @messageid 108004
- * @diagnosis Replication sub volume witnessed a connection notification
- * from a brick which does not belong to its replica set.
- * @recommendedaction None. This is a safety check in code.
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
*/
-#define AFR_MSG_INVALID_CHILD_UP (GLFS_COMP_BASE_AFR + 4)
-
-
-/*!
- * @messageid 108005
- * @diagnosis A replica set that was inaccessible because all its bricks were
- * down is now accessible because at least one of its bricks came back up.
- * @recommendedaction Possibly check why all the bricks of that replica set
- * went down to begin with.
- */
-#define AFR_MSG_SUBVOL_UP (GLFS_COMP_BASE_AFR + 5)
-
-
-/*!
- * @messageid 108006
- * @diagnosis All bricks of a replica set are down. Data residing in that
- * replica cannot be accessed until one of the bricks come back up.
- * @recommendedaction Ensure that the bricks are up.
- */
-#define AFR_MSG_ALL_SUBVOLS_DOWN (GLFS_COMP_BASE_AFR + 6)
-
-
-/*!
- * @messageid 108007
- * @diagnosis Entry unlocks failed on a brick.
- * @recommendedaction Error number in the log should give the reason why it
- * failed. Also observe brick logs for more information.
-*/
-#define AFR_MSG_ENTRY_UNLOCK_FAIL (GLFS_COMP_BASE_AFR + 7)
-
-
-/*!
- * @messageid 108008
- * @diagnosis There is an inconsistency in the file's data/metadata/gfid
- * amongst the bricks of a replica set.
- * @recommendedaction Resolve the split brain by clearing the AFR changelog
- * attributes from the appropriate brick and trigger self-heal.
- */
-#define AFR_MSG_SPLIT_BRAIN (GLFS_COMP_BASE_AFR + 8)
-
-
-/*!
- * @messageid 108009
- * @diagnosis open/opendir failed on a brick.
- * @recommendedaction Error number in the log should give the reason why it
- * failed. Also observe brick logs for more information.
- */
-#define AFR_MSG_OPEN_FAIL (GLFS_COMP_BASE_AFR + 9)
-
-
-/*!
- * @messageid 108010
- * @diagnosis Inode unlocks failed on a brick.
- * @recommendedaction Error number in the log should give the reason why it
- * failed. Also observe brick logs for more information.
-*/
-#define AFR_MSG_INODE_UNLOCK_FAIL (GLFS_COMP_BASE_AFR + 10)
-
-/*!
- * @messageid 108011
- * @diagnosis Setting of pending xattrs succeeded/failed during replace-brick
- * operation.
- * @recommendedaction In case of failure, error number in the log should give
- * the reason why it failed. Also observe brick logs for more information.
-*/
-#define AFR_MSG_REPLACE_BRICK_STATUS (GLFS_COMP_BASE_AFR + 11)
-
-/*!
- * @messageid 108012
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_GFID_NULL (GLFS_COMP_BASE_AFR + 12)
-
-/*!
- * @messageid 108013
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_FD_CREATE_FAILED (GLFS_COMP_BASE_AFR + 13)
-
-/*!
- * @messageid 108014
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_DICT_SET_FAILED (GLFS_COMP_BASE_AFR + 14)
-
-/*!
- * @messageid 108015
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_EXPUNGING_FILE_OR_DIR (GLFS_COMP_BASE_AFR + 15)
-
-/*!
- * @messageid 108016
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_MIGRATION_IN_PROGRESS (GLFS_COMP_BASE_AFR + 16)
-
-/*!
- * @messageid 108017
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_CHILD_MISCONFIGURED (GLFS_COMP_BASE_AFR + 17)
-
-/*!
- * @messageid 108018
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_VOL_MISCONFIGURED (GLFS_COMP_BASE_AFR + 18)
-
-/*!
- * @messageid 108019
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_BLOCKING_LKS_FAILED (GLFS_COMP_BASE_AFR + 19)
-
-/*!
- * @messageid 108020
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_INVALID_FD (GLFS_COMP_BASE_AFR + 20)
-
-/*!
- * @messageid 108021
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_LOCK_INFO (GLFS_COMP_BASE_AFR + 21)
-
-/*!
- * @messageid 108022
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_LOCK_XLATOR_NOT_LOADED (GLFS_COMP_BASE_AFR + 22)
-
-/*!
- * @messageid 108023
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_FD_CTX_GET_FAILED (GLFS_COMP_BASE_AFR + 23)
-
-/*!
- * @messageid 108024
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_INVALID_SUBVOL (GLFS_COMP_BASE_AFR + 24)
-
-/*!
- * @messageid 108025
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_PUMP_XLATOR_ERROR (GLFS_COMP_BASE_AFR + 25)
-
-/*!
- * @messageid 108026
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_SELF_HEAL_INFO (GLFS_COMP_BASE_AFR + 26)
-
-/*!
- * @messageid 108027
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_READ_SUBVOL_ERROR (GLFS_COMP_BASE_AFR + 27)
-
-/*!
- * @messageid 108028
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_DICT_GET_FAILED (GLFS_COMP_BASE_AFR + 28)
-
-
-/*!
- * @messageid 108029
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_INFO_COMMON (GLFS_COMP_BASE_AFR + 29)
-
-/*!
- * @messageid 108030
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR (GLFS_COMP_BASE_AFR + 30)
-
-/*!
- * @messageid 108031
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_LOCAL_CHILD (GLFS_COMP_BASE_AFR + 31)
-
-/*!
- * @messageid 108032
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_INVALID_DATA (GLFS_COMP_BASE_AFR + 32)
-
-/*!
- * @messageid 108033
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_INVALID_ARG (GLFS_COMP_BASE_AFR + 33)
-
-/*!
- * @messageid 108034
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_INDEX_DIR_GET_FAILED (GLFS_COMP_BASE_AFR + 34)
-
-/*!
- * @messageid 108035
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_FSYNC_FAILED (GLFS_COMP_BASE_AFR + 35)
-
-/*!
- * @messageid 108036
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_FAVORITE_CHILD (GLFS_COMP_BASE_AFR + 36)
-/*!
- * @messageid 108037
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_SELF_HEAL_FAILED (GLFS_COMP_BASE_AFR + 37)
-
-/*!
- * @messageid 108038
- * @diagnosis
- * @recommendedaction
-*/
-#define AFR_MSG_SPLIT_BRAIN_STATUS (GLFS_COMP_BASE_AFR + 38)
-
-/*!
- * @messageid 108039
- * @diagnosis Setting of pending xattrs succeeded/failed during add-brick
- * operation.
- * @recommendedaction In case of failure, error number in the log should give
- * the reason why it failed. Also observe brick logs for more information.
-*/
-#define AFR_MSG_ADD_BRICK_STATUS (GLFS_COMP_BASE_AFR + 39)
-
-
-/*!
- * @messageid 108040
- * @diagnosis AFR was unable to be loaded because the pending-changelog xattrs
- * were not found in the volfile.
- * @recommendedaction Please ensure cluster op-version is atleast 30707 and the
- * volfiles are regenerated.
-*/
-#define AFR_MSG_NO_CHANGELOG (GLFS_COMP_BASE_AFR + 40)
-#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"
+GLFS_MSGID(
+ AFR, AFR_MSG_QUORUM_FAIL, AFR_MSG_QUORUM_MET, AFR_MSG_QUORUM_OVERRIDE,
+ AFR_MSG_INVALID_CHILD_UP, AFR_MSG_SUBVOL_UP, AFR_MSG_SUBVOLS_DOWN,
+ AFR_MSG_ENTRY_UNLOCK_FAIL, AFR_MSG_SPLIT_BRAIN, AFR_MSG_OPEN_FAIL,
+ AFR_MSG_UNLOCK_FAIL, AFR_MSG_REPLACE_BRICK_STATUS, AFR_MSG_GFID_NULL,
+ AFR_MSG_FD_CREATE_FAILED, AFR_MSG_DICT_SET_FAILED,
+ AFR_MSG_EXPUNGING_FILE_OR_DIR, AFR_MSG_MIGRATION_IN_PROGRESS,
+ AFR_MSG_CHILD_MISCONFIGURED, AFR_MSG_VOL_MISCONFIGURED,
+ AFR_MSG_INTERNAL_LKS_FAILED, AFR_MSG_INVALID_FD, AFR_MSG_LOCK_INFO,
+ AFR_MSG_LOCK_XLATOR_NOT_LOADED, AFR_MSG_FD_CTX_GET_FAILED,
+ AFR_MSG_INVALID_SUBVOL, AFR_MSG_PUMP_XLATOR_ERROR, AFR_MSG_SELF_HEAL_INFO,
+ AFR_MSG_READ_SUBVOL_ERROR, AFR_MSG_DICT_GET_FAILED, AFR_MSG_INFO_COMMON,
+ AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR, AFR_MSG_LOCAL_CHILD, AFR_MSG_INVALID_DATA,
+ AFR_MSG_INVALID_ARG, AFR_MSG_INDEX_DIR_GET_FAILED, AFR_MSG_FSYNC_FAILED,
+ AFR_MSG_FAVORITE_CHILD, AFR_MSG_SELF_HEAL_FAILED,
+ AFR_MSG_SPLIT_BRAIN_STATUS, AFR_MSG_ADD_BRICK_STATUS, AFR_MSG_NO_CHANGELOG,
+ AFR_MSG_TIMER_CREATE_FAIL, AFR_MSG_SBRAIN_FAV_CHILD_POLICY,
+ AFR_MSG_INODE_CTX_GET_FAILED, AFR_MSG_THIN_ARB,
+ AFR_MSG_THIN_ARB_XATTROP_FAILED, AFR_MSG_THIN_ARB_LOC_POP_FAILED,
+ AFR_MSG_GET_PEND_VAL, AFR_MSG_THIN_ARB_SKIP_SHD, AFR_MSG_UNKNOWN_SET,
+ AFR_MSG_NO_XL_ID, AFR_MSG_SELF_HEAL_INFO_START,
+ AFR_MSG_SELF_HEAL_INFO_FINISH, AFR_MSG_INCRE_COUNT,
+ AFR_MSG_ADD_TO_OUTPUT_FAILED, AFR_MSG_SET_TIME_FAILED,
+ AFR_MSG_GFID_MISMATCH_DETECTED, AFR_MSG_GFID_HEAL_MSG,
+ AFR_MSG_THIN_ARB_LOOKUP_FAILED, AFR_MSG_DICT_CREATE_FAILED,
+ AFR_MSG_NO_MAJORITY_TO_RESOLVE, AFR_MSG_TYPE_MISMATCH,
+ AFR_MSG_SIZE_POLICY_NOT_APPLICABLE, AFR_MSG_NO_CHILD_SELECTED,
+ AFR_MSG_INVALID_CHILD, AFR_MSG_RESOLVE_CONFLICTING_DATA,
+ SERROR_GETTING_SRC_BRICK, SNO_DIFF_IN_MTIME, SNO_BIGGER_FILE,
+ SALL_BRICKS_UP_TO_RESOLVE, AFR_MSG_UNLOCK_FAILED, AFR_MSG_POST_OP_FAILED,
+ AFR_MSG_TA_FRAME_CREATE_FAILED, AFR_MSG_SET_KEY_XATTROP_FAILED,
+ AFR_MSG_BLOCKING_ENTRYLKS_FAILED, AFR_MSG_FOP_FAILED,
+ AFR_MSG_CLEAN_UP_FAILED, AFR_MSG_UNABLE_TO_FETCH, AFR_MSG_XATTR_SET_FAILED,
+ AFR_MSG_SPLIT_BRAIN_REPLICA, AFR_MSG_INODE_CTX_FAILED,
+ AFR_MSG_LOOKUP_FAILED, AFR_MSG_ALL_SUBVOLS_DOWN,
+ AFR_MSG_RELEASE_LOCK_FAILED, AFR_MSG_CLEAR_TIME_SPLIT_BRAIN,
+ AFR_MSG_READ_FAILED, AFR_MSG_LAUNCH_FAILED, AFR_MSG_READ_SUBVOL_NOT_UP,
+ AFR_MSG_LK_HEAL_DOM, AFR_MSG_NEW_BRICK, AFR_MSG_SPLIT_BRAIN_SET_FAILED,
+ AFR_MSG_SPLIT_BRAIN_DETERMINE_FAILED, AFR_MSG_HEALER_SPAWN_FAILED,
+ AFR_MSG_ADD_CRAWL_EVENT_FAILED, AFR_MSG_NULL_DEREF, AFR_MSG_SET_PEND_XATTR,
+ AFR_MSG_INTERNAL_ATTR);
+
+#define AFR_MSG_DICT_GET_FAILED_STR "Dict get failed"
+#define AFR_MSG_DICT_SET_FAILED_STR "Dict set failed"
+#define AFR_MSG_HEALER_SPAWN_FAILED_STR "Healer spawn failed"
+#define AFR_MSG_ADD_CRAWL_EVENT_FAILED_STR "Adding crawl event failed"
+#define AFR_MSG_INVALID_ARG_STR "Invalid argument"
+#define AFR_MSG_INDEX_DIR_GET_FAILED_STR "unable to get index-dir on "
+#define AFR_MSG_THIN_ARB_LOOKUP_FAILED_STR "Failed lookup on file"
+#define AFR_MSG_DICT_CREATE_FAILED_STR "Failed to create dict."
+#define AFR_MSG_THIN_ARB_XATTROP_FAILED_STR "Xattrop failed."
+#define AFR_MSG_THIN_ARB_LOC_POP_FAILED_STR \
+ "Failed to populate loc for thin-arbiter"
+#define AFR_MSG_GET_PEND_VAL_STR "Error getting value of pending"
+#define AFR_MSG_THIN_ARB_SKIP_SHD_STR "I am not the god shd. skipping."
+#define AFR_MSG_UNKNOWN_SET_STR "Unknown set"
+#define AFR_MSG_NO_XL_ID_STR "xl does not have id"
+#define AFR_MSG_SELF_HEAL_INFO_START_STR "starting full sweep on"
+#define AFR_MSG_SELF_HEAL_INFO_FINISH_STR "finished full sweep on"
+#define AFR_MSG_INCRE_COUNT_STR "Could not increment the counter."
+#define AFR_MSG_ADD_TO_OUTPUT_FAILED_STR "Could not add to output"
+#define AFR_MSG_SET_TIME_FAILED_STR "Could not set time"
+#define AFR_MSG_GFID_HEAL_MSG_STR "Error setting gfid-heal-msg dict"
+#define AFR_MSG_NO_MAJORITY_TO_RESOLVE_STR \
+ "No majority to resolve gfid split brain"
+#define AFR_MSG_GFID_MISMATCH_DETECTED_STR "Gfid mismatch dectected"
+#define AFR_MSG_SELF_HEAL_INFO_STR "performing selfheal"
+#define AFR_MSG_TYPE_MISMATCH_STR "TYPE mismatch"
+#define AFR_MSG_SIZE_POLICY_NOT_APPLICABLE_STR \
+ "Size policy is not applicable to directories."
+#define AFR_MSG_NO_CHILD_SELECTED_STR \
+ "No child selected by favorite-child policy"
+#define AFR_MSG_INVALID_CHILD_STR "Invalid child"
+#define AFR_MSG_RESOLVE_CONFLICTING_DATA_STR \
+ "selected as authentic to resolve conflicting data"
+#define SERROR_GETTING_SRC_BRICK_STR "Error getting the source brick"
+#define SNO_DIFF_IN_MTIME_STR "No difference in mtime"
+#define SNO_BIGGER_FILE_STR "No bigger file"
+#define SALL_BRICKS_UP_TO_RESOLVE_STR \
+ "All the bricks should be up to resolve the gfid split brain"
+#define AFR_MSG_UNLOCK_FAILED_STR "Failed to unlock"
+#define AFR_MSG_POST_OP_FAILED_STR "Post-op on thin-arbiter failed"
+#define AFR_MSG_TA_FRAME_CREATE_FAILED_STR "Failed to create ta_frame"
+#define AFR_MSG_SET_KEY_XATTROP_FAILED_STR "Could not set key during xattrop"
+#define AFR_MSG_BLOCKING_ENTRYLKS_FAILED_STR "Blocking entrylks failed"
+#define AFR_MSG_FSYNC_FAILED_STR "fsync failed"
+#define AFR_MSG_QUORUM_FAIL_STR "quorum is not met"
+#define AFR_MSG_FOP_FAILED_STR "Failing Fop"
+#define AFR_MSG_INVALID_SUBVOL_STR "not a subvolume"
+#define AFR_MSG_VOL_MISCONFIGURED_STR "Volume is dangling"
+#define AFR_MSG_CHILD_MISCONFIGURED_STR \
+ "replicate translator needs more than one subvolume defined"
+#define AFR_MSG_CLEAN_UP_FAILED_STR "Failed to clean up healer threads"
+#define AFR_MSG_QUORUM_OVERRIDE_STR "overriding quorum-count"
+#define AFR_MSG_UNABLE_TO_FETCH_STR \
+ "Unable to fetch afr-pending-xattr option from volfile. Falling back to " \
+ "using client translator names"
+#define AFR_MSG_NULL_DEREF_STR "possible NULL deref"
+#define AFR_MSG_XATTR_SET_FAILED_STR "Cannot set xattr cookie key"
+#define AFR_MSG_SPLIT_BRAIN_STATUS_STR "Failed to create synctask"
+#define AFR_MSG_SUBVOLS_DOWN_STR "All subvolumes are not up"
+#define AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR_STR \
+ "Failed to cancel split-brain choice"
+#define AFR_MSG_SPLIT_BRAIN_REPLICA_STR \
+ "Cannot set replica. File is not in data/metadata split-brain"
+#define AFR_MSG_INODE_CTX_FAILED_STR "Failed to get inode_ctx"
+#define AFR_MSG_READ_SUBVOL_ERROR_STR "no read subvols"
+#define AFR_MSG_LOCAL_CHILD_STR "selecting local read-child"
+#define AFR_MSG_LOOKUP_FAILED_STR "Failed to lookup/create thin-arbiter id file"
+#define AFR_MSG_TIMER_CREATE_FAIL_STR \
+ "Cannot create timer for delayed initialization"
+#define AFR_MSG_SUBVOL_UP_STR "Subvolume came back up; going online"
+#define AFR_MSG_ALL_SUBVOLS_DOWN_STR \
+ "All subvolumes are down. Going offline until atleast one of them is up"
+#define AFR_MSG_RELEASE_LOCK_FAILED_STR "Failed to release lock"
+#define AFR_MSG_INVALID_CHILD_UP_STR "Received child_up from invalid subvolume"
+#define AFR_MSG_QUORUM_MET_STR "Client-quorum is met"
+#define AFR_MSG_EXPUNGING_FILE_OR_DIR_STR "expunging file or dir"
+#define AFR_MSG_SELF_HEAL_FAILED_STR "Invalid"
+#define AFR_MSG_SPLIT_BRAIN_STR "Skipping conservative mergeon the file"
+#define AFR_MSG_CLEAR_TIME_SPLIT_BRAIN_STR "clear time split brain"
+#define AFR_MSG_READ_FAILED_STR "Failing read since good brick is down"
+#define AFR_MSG_LAUNCH_FAILED_STR "Failed to launch synctask"
+#define AFR_MSG_READ_SUBVOL_NOT_UP_STR \
+ "read subvolume in this generation is not up"
+#define AFR_MSG_INTERNAL_LKS_FAILED_STR \
+ "Unable to work with lk-owner while attempting fop"
+#define AFR_MSG_LOCK_XLATOR_NOT_LOADED_STR \
+ "subvolume does not support locking. please load features/locks xlator " \
+ "on server."
+#define AFR_MSG_FD_CTX_GET_FAILED_STR "unable to get fd ctx"
+#define AFR_MSG_INFO_COMMON_STR "fd not open on any subvolumes, aborting."
+#define AFR_MSG_REPLACE_BRICK_STATUS_STR "Couldn't acquire lock on any child."
+#define AFR_MSG_NEW_BRICK_STR "New brick"
+#define AFR_MSG_SPLIT_BRAIN_SET_FAILED_STR \
+ "Failed to set split-brain choice to -1"
+#define AFR_MSG_SPLIT_BRAIN_DETERMINE_FAILED_STR \
+ "Failed to determine split-brain. Aborting split-brain-choice set"
+#define AFR_MSG_OPEN_FAIL_STR "Failed to open subvolume"
+#define AFR_MSG_SET_PEND_XATTR_STR "Set of pending xattr"
+#define AFR_MSG_INTERNAL_ATTR_STR "is an internal extended attribute"
#endif /* !_AFR_MESSAGES_H_ */
diff --git a/xlators/cluster/afr/src/afr-open.c b/xlators/cluster/afr/src/afr-open.c
index 059d3f9bd71..64856042b65 100644
--- a/xlators/cluster/afr/src/afr-open.c
+++ b/xlators/cluster/afr/src/afr-open.c
@@ -8,322 +8,346 @@
cases as published by the Free Software Foundation.
*/
-#include <libgen.h>
#include <unistd.h>
-#include <fnmatch.h>
#include <sys/time.h>
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "afr.h"
-#include "dict.h"
-#include "xlator.h"
-#include "hashfn.h"
-#include "logging.h"
-#include "stack.h"
-#include "list.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "common-utils.h"
-#include "compat-errno.h"
-#include "compat.h"
-#include "byte-order.h"
-#include "statedump.h"
-
-#include "fd.h"
-
-#include "afr-inode-read.h"
-#include "afr-inode-write.h"
-#include "afr-dir-read.h"
-#include "afr-dir-write.h"
-#include "afr-transaction.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/statedump.h>
+#include "afr-transaction.h"
gf_boolean_t
-afr_is_fd_fixable (fd_t *fd)
+afr_is_fd_fixable(fd_t *fd)
{
- if (!fd || !fd->inode)
- return _gf_false;
- else if (fd_is_anonymous (fd))
- return _gf_false;
- else if (gf_uuid_is_null (fd->inode->gfid))
- return _gf_false;
-
- return _gf_true;
+ if (!fd || !fd->inode)
+ return _gf_false;
+ else if (fd_is_anonymous(fd))
+ return _gf_false;
+ else if (gf_uuid_is_null(fd->inode->gfid))
+ return _gf_false;
+
+ return _gf_true;
}
-
int
-afr_open_ftruncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf, dict_t *xdata)
+afr_open_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- afr_local_t * local = frame->local;
+ afr_local_t *local = frame->local;
- AFR_STACK_UNWIND (open, frame, local->op_ret, local->op_errno,
- local->fd, xdata);
- return 0;
+ AFR_STACK_UNWIND(open, frame, local->op_ret, local->op_errno,
+ local->cont.open.fd, xdata);
+ return 0;
}
-
int
-afr_open_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- fd_t *fd, dict_t *xdata)
+afr_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, fd_t *fd, dict_t *xdata)
{
- afr_local_t * local = NULL;
- int call_count = -1;
- int child_index = (long) cookie;
- afr_fd_ctx_t *fd_ctx = NULL;
-
- local = frame->local;
- fd_ctx = local->fd_ctx;
-
- LOCK (&frame->lock);
- {
- if (op_ret == -1) {
- local->op_errno = op_errno;
- fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
- } else {
- local->op_ret = op_ret;
- fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
- if (!local->xdata_rsp && xdata)
- local->xdata_rsp = dict_ref (xdata);
- }
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ int child_index = (long)cookie;
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ local = frame->local;
+ fd_ctx = local->fd_ctx;
+
+ local->replies[child_index].valid = 1;
+ local->replies[child_index].op_ret = op_ret;
+ local->replies[child_index].op_errno = op_errno;
+
+ LOCK(&frame->lock);
+ {
+ if (op_ret == -1) {
+ local->op_errno = op_errno;
+ fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
+ } else {
+ local->op_ret = op_ret;
+ fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
+ if (!local->xdata_rsp && xdata)
+ local->xdata_rsp = dict_ref(xdata);
}
- UNLOCK (&frame->lock);
-
- call_count = afr_frame_return (frame);
-
- if (call_count == 0) {
- if ((fd_ctx->flags & O_TRUNC) && (local->op_ret >= 0)) {
- STACK_WIND (frame, afr_open_ftruncate_cbk,
- this, this->fops->ftruncate,
- fd, 0, NULL);
- } else {
- AFR_STACK_UNWIND (open, frame, local->op_ret,
- local->op_errno, local->fd,
- local->xdata_rsp);
- }
+ call_count = --local->call_count;
+ }
+ UNLOCK(&frame->lock);
+
+ if (call_count == 0) {
+ afr_handle_replies_quorum(frame, this);
+ if (local->op_ret == -1) {
+ AFR_STACK_UNWIND(open, frame, local->op_ret, local->op_errno, NULL,
+ NULL);
+ } else if (fd_ctx->flags & O_TRUNC) {
+ STACK_WIND(frame, afr_open_ftruncate_cbk, this,
+ this->fops->ftruncate, fd, 0, NULL);
+ } else {
+ AFR_STACK_UNWIND(open, frame, local->op_ret, local->op_errno,
+ local->cont.open.fd, local->xdata_rsp);
}
+ }
- return 0;
+ return 0;
}
int
-afr_open (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
- fd_t *fd, dict_t *xdata)
+afr_open_continue(call_frame_t *frame, xlator_t *this, int err)
{
- afr_private_t * priv = NULL;
- afr_local_t * local = NULL;
- int i = 0;
- int32_t call_count = 0;
- int32_t op_errno = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
-
- //We can't let truncation to happen outside transaction.
-
- priv = this->private;
-
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx) {
- op_errno = ENOMEM;
- goto out;
- }
-
- local->fd = fd_ref (fd);
- local->fd_ctx = fd_ctx;
- fd_ctx->flags = flags;
-
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = 0;
+ int i = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (err) {
+ AFR_STACK_UNWIND(open, frame, -1, err, NULL, NULL);
+ } else {
+ local->call_count = AFR_COUNT(local->child_up, priv->child_count);
call_count = local->call_count;
- local->cont.open.flags = flags;
-
for (i = 0; i < priv->child_count; i++) {
- if (local->child_up[i]) {
- STACK_WIND_COOKIE (frame, afr_open_cbk, (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->open,
- loc, (flags & ~O_TRUNC), fd, xdata);
- if (!--call_count)
- break;
- }
+ if (local->child_up[i]) {
+ STACK_WIND_COOKIE(frame, afr_open_cbk, (void *)(long)i,
+ priv->children[i],
+ priv->children[i]->fops->open, &local->loc,
+ (local->cont.open.flags & ~O_TRUNC),
+ local->cont.open.fd, local->xdata_req);
+ if (!--call_count)
+ break;
+ }
}
+ }
+ return 0;
+}
- return 0;
+int
+afr_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int spb_subvol = 0;
+ int event_generation = 0;
+ int ret = 0;
+ int32_t op_errno = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
+
+ // We can't let truncation to happen outside transaction.
+
+ priv = this->private;
+
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
+
+ local->op = GF_FOP_OPEN;
+ fd_ctx = afr_fd_ctx_get(fd, this);
+ if (!fd_ctx) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ if (priv->quorum_count && !afr_has_quorum(local->child_up, this, NULL)) {
+ op_errno = afr_quorum_errno(priv);
+ goto out;
+ }
+
+ if (!afr_is_consistent_io_possible(local, priv, &op_errno))
+ goto out;
+
+ local->inode = inode_ref(loc->inode);
+ loc_copy(&local->loc, loc);
+ local->fd_ctx = fd_ctx;
+ fd_ctx->flags = flags;
+ if (xdata)
+ local->xdata_req = dict_ref(xdata);
+
+ local->cont.open.flags = flags;
+ local->cont.open.fd = fd_ref(fd);
+
+ ret = afr_inode_get_readable(frame, local->inode, this, NULL,
+ &event_generation, AFR_DATA_TRANSACTION);
+ if ((ret < 0) &&
+ (afr_split_brain_read_subvol_get(local->inode, this, NULL,
+ &spb_subvol) == 0) &&
+ spb_subvol < 0) {
+ afr_inode_refresh(frame, this, local->inode, local->inode->gfid,
+ afr_open_continue);
+ } else {
+ afr_open_continue(frame, this, 0);
+ }
+
+ return 0;
out:
- AFR_STACK_UNWIND (open, frame, -1, op_errno, fd, NULL);
+ AFR_STACK_UNWIND(open, frame, -1, op_errno, fd, NULL);
- return 0;
+ return 0;
}
int
-afr_openfd_fix_open_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd,
- dict_t *xdata)
+afr_openfd_fix_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd,
+ dict_t *xdata)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
- int call_count = 0;
- int child_index = (long) cookie;
-
- priv = this->private;
- local = frame->local;
-
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ int call_count = 0;
+ int child_index = (long)cookie;
+
+ priv = this->private;
+ local = frame->local;
+
+ if (op_ret >= 0) {
+ gf_msg_debug(this->name, 0,
+ "fd for %s opened "
+ "successfully on subvolume %s",
+ local->loc.path, priv->children[child_index]->name);
+ } else {
+ gf_smsg(this->name, fop_log_level(GF_FOP_OPEN, op_errno), op_errno,
+ AFR_MSG_OPEN_FAIL, "path=%s", local->loc.path, "subvolume=%s",
+ priv->children[child_index]->name, NULL);
+ }
+
+ fd_ctx = local->fd_ctx;
+
+ LOCK(&local->fd->lock);
+ {
if (op_ret >= 0) {
- gf_msg_debug (this->name, 0, "fd for %s opened "
- "successfully on subvolume %s", local->loc.path,
- priv->children[child_index]->name);
+ fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
} else {
- gf_msg (this->name, fop_log_level (GF_FOP_OPEN, op_errno),
- op_errno, AFR_MSG_OPEN_FAIL, "Failed to open %s on "
- "subvolume %s", local->loc.path,
- priv->children[child_index]->name);
+ fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
}
+ }
+ UNLOCK(&local->fd->lock);
- fd_ctx = local->fd_ctx;
+ call_count = afr_frame_return(frame);
+ if (call_count == 0)
+ AFR_STACK_DESTROY(frame);
- LOCK (&local->fd->lock);
- {
- if (op_ret >= 0) {
- fd_ctx->opened_on[child_index] = AFR_FD_OPENED;
- } else {
- fd_ctx->opened_on[child_index] = AFR_FD_NOT_OPENED;
- }
- }
- UNLOCK (&local->fd->lock);
+ return 0;
+}
- call_count = afr_frame_return (frame);
- if (call_count == 0)
- AFR_STACK_DESTROY (frame);
+static int
+afr_fd_ctx_need_open(fd_t *fd, xlator_t *this, unsigned char *need_open)
+{
+ afr_fd_ctx_t *fd_ctx = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int count = 0;
+
+ priv = this->private;
+ fd_ctx = afr_fd_ctx_get(fd, this);
+ if (!fd_ctx)
return 0;
-}
+ LOCK(&fd->lock);
+ {
+ for (i = 0; i < priv->child_count; i++) {
+ if (fd_ctx->opened_on[i] == AFR_FD_NOT_OPENED &&
+ priv->child_up[i]) {
+ fd_ctx->opened_on[i] = AFR_FD_OPENING;
+ need_open[i] = 1;
+ count++;
+ } else {
+ need_open[i] = 0;
+ }
+ }
+ }
+ UNLOCK(&fd->lock);
-static int
-afr_fd_ctx_need_open (fd_t *fd, xlator_t *this, unsigned char *need_open)
-{
- afr_fd_ctx_t *fd_ctx = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- int count = 0;
-
- priv = this->private;
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- return 0;
-
- LOCK (&fd->lock);
- {
- for (i = 0; i < priv->child_count; i++) {
- if (fd_ctx->opened_on[i] == AFR_FD_NOT_OPENED &&
- priv->child_up[i]) {
- fd_ctx->opened_on[i] = AFR_FD_OPENING;
- need_open[i] = 1;
- count++;
- } else {
- need_open[i] = 0;
- }
- }
- }
- UNLOCK (&fd->lock);
-
- return count;
+ return count;
}
-
void
-afr_fix_open (fd_t *fd, xlator_t *this)
+afr_fix_open(fd_t *fd, xlator_t *this)
{
- afr_private_t *priv = NULL;
- int i = 0;
- call_frame_t *frame = NULL;
- afr_local_t *local = NULL;
- int ret = -1;
- int32_t op_errno = 0;
- afr_fd_ctx_t *fd_ctx = NULL;
- unsigned char *need_open = NULL;
- int call_count = 0;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ call_frame_t *frame = NULL;
+ afr_local_t *local = NULL;
+ int ret = -1;
+ int32_t op_errno = 0;
+ afr_fd_ctx_t *fd_ctx = NULL;
+ unsigned char *need_open = NULL;
+ int call_count = 0;
- priv = this->private;
+ priv = this->private;
- if (!afr_is_fd_fixable (fd))
- goto out;
+ if (!afr_is_fd_fixable(fd))
+ goto out;
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- goto out;
+ fd_ctx = afr_fd_ctx_get(fd, this);
+ if (!fd_ctx)
+ goto out;
- need_open = alloca0 (priv->child_count);
+ need_open = alloca0(priv->child_count);
- call_count = afr_fd_ctx_need_open (fd, this, need_open);
- if (!call_count)
- goto out;
+ call_count = afr_fd_ctx_need_open(fd, this, need_open);
+ if (!call_count)
+ goto out;
- frame = create_frame (this, this->ctx->pool);
- if (!frame)
- goto out;
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame)
+ goto out;
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
+ local = AFR_FRAME_INIT(frame, op_errno);
+ if (!local)
+ goto out;
- local->loc.inode = inode_ref (fd->inode);
- ret = loc_path (&local->loc, NULL);
- if (ret < 0)
- goto out;
+ local->loc.inode = inode_ref(fd->inode);
+ ret = loc_path(&local->loc, NULL);
+ if (ret < 0)
+ goto out;
- local->fd = fd_ref (fd);
- local->fd_ctx = fd_ctx;
+ local->fd = fd_ref(fd);
+ local->fd_ctx = fd_ctx;
- local->call_count = call_count;
+ local->call_count = call_count;
- gf_msg_debug (this->name, 0, "need open count: %d",
- call_count);
+ gf_msg_debug(this->name, 0, "need open count: %d", call_count);
- for (i = 0; i < priv->child_count; i++) {
- if (!need_open[i])
- continue;
-
- if (IA_IFDIR == fd->inode->ia_type) {
- gf_msg_debug (this->name, 0,
- "opening fd for dir %s on subvolume %s",
- local->loc.path, priv->children[i]->name);
-
- STACK_WIND_COOKIE (frame, afr_openfd_fix_open_cbk,
- (void*) (long) i,
- priv->children[i],
- priv->children[i]->fops->opendir,
- &local->loc, local->fd,
- NULL);
- } else {
- gf_msg_debug (this->name, 0,
- "opening fd for file %s on subvolume %s",
- local->loc.path, priv->children[i]->name);
-
- STACK_WIND_COOKIE (frame, afr_openfd_fix_open_cbk,
- (void *)(long) i,
- priv->children[i],
- priv->children[i]->fops->open,
- &local->loc,
- fd_ctx->flags & (~O_TRUNC),
- local->fd, NULL);
- }
-
- if (!--call_count)
- break;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!need_open[i])
+ continue;
+
+ if (IA_IFDIR == fd->inode->ia_type) {
+ gf_msg_debug(this->name, 0, "opening fd for dir %s on subvolume %s",
+ local->loc.path, priv->children[i]->name);
+
+ STACK_WIND_COOKIE(frame, afr_openfd_fix_open_cbk, (void *)(long)i,
+ priv->children[i],
+ priv->children[i]->fops->opendir, &local->loc,
+ local->fd, NULL);
+ } else {
+ gf_msg_debug(this->name, 0,
+ "opening fd for file %s on subvolume %s",
+ local->loc.path, priv->children[i]->name);
+
+ STACK_WIND_COOKIE(frame, afr_openfd_fix_open_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->open,
+ &local->loc, fd_ctx->flags & (~O_TRUNC),
+ local->fd, NULL);
}
- return;
+ if (!--call_count)
+ break;
+ }
+
+ return;
out:
- if (frame)
- AFR_STACK_DESTROY (frame);
+ if (frame)
+ AFR_STACK_DESTROY(frame);
}
diff --git a/xlators/cluster/afr/src/afr-read-txn.c b/xlators/cluster/afr/src/afr-read-txn.c
index a70565c37a1..6fc2c75145c 100644
--- a/xlators/cluster/afr/src/afr-read-txn.c
+++ b/xlators/cluster/afr/src/afr-read-txn.c
@@ -12,125 +12,328 @@
#include "afr-transaction.h"
#include "afr-messages.h"
-int
-afr_read_txn_next_subvol (call_frame_t *frame, xlator_t *this)
+void
+afr_pending_read_increment(afr_private_t *priv, int child_index)
+{
+ if (child_index < 0 || child_index > priv->child_count)
+ return;
+
+ GF_ATOMIC_INC(priv->pending_reads[child_index]);
+}
+
+void
+afr_pending_read_decrement(afr_private_t *priv, int child_index)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- int subvol = -1;
-
- local = frame->local;
- priv = this->private;
-
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->readable[i]) {
- /* don't even bother trying here.
- just mark as attempted and move on. */
- local->read_attempted[i] = 1;
- continue;
- }
-
- if (!local->read_attempted[i]) {
- subvol = i;
- break;
- }
- }
-
- /* If no more subvols were available for reading, we leave
- @subvol as -1, which is an indication we have run out of
- readable subvols. */
- if (subvol != -1)
- local->read_attempted[subvol] = 1;
- local->readfn (frame, this, subvol);
-
- return 0;
+ if (child_index < 0 || child_index > priv->child_count)
+ return;
+
+ GF_ATOMIC_DEC(priv->pending_reads[child_index]);
}
-#define AFR_READ_TXN_SET_ERROR_AND_GOTO(ret, errnum, index, label) \
- do { \
- local->op_ret = ret; \
- local->op_errno = errnum; \
- read_subvol = index; \
- gf_msg (this->name, GF_LOG_ERROR, EIO, AFR_MSG_SPLIT_BRAIN,\
- "Failing %s on gfid %s: split-brain observed.",\
- gf_fop_list[local->op], uuid_utoa (inode->gfid));\
- goto label; \
- } while (0)
+void
+afr_read_txn_wind(call_frame_t *frame, xlator_t *this, int subvol)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ afr_pending_read_decrement(priv, local->read_subvol);
+ local->read_subvol = subvol;
+ afr_pending_read_increment(priv, subvol);
+ local->readfn(frame, this, subvol);
+}
int
-afr_read_txn_refresh_done (call_frame_t *frame, xlator_t *this, int err)
+afr_read_txn_next_subvol(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- int read_subvol = 0;
- int event_generation = 0;
- inode_t *inode = NULL;
- int ret = -1;
- int spb_choice = -1;
-
- local = frame->local;
- inode = local->inode;
-
- if (err) {
- local->op_errno = -err;
- local->op_ret = -1;
- read_subvol = -1;
- goto readfn;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int subvol = -1;
+
+ local = frame->local;
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->readable[i]) {
+ /* don't even bother trying here.
+ just mark as attempted and move on. */
+ local->read_attempted[i] = 1;
+ continue;
}
- ret = afr_inode_get_readable (frame, inode, this, local->readable,
- &event_generation,
- local->transaction.type);
+ if (!local->read_attempted[i]) {
+ subvol = i;
+ break;
+ }
+ }
- if (ret == -1 || !event_generation)
- /* Even after refresh, we don't have a good
- read subvolume. Time to bail */
- AFR_READ_TXN_SET_ERROR_AND_GOTO (-1, EIO, -1, readfn);
+ /* If no more subvols were available for reading, we leave
+ @subvol as -1, which is an indication we have run out of
+ readable subvols. */
+ if (subvol != -1)
+ local->read_attempted[subvol] = 1;
+ afr_read_txn_wind(frame, this, subvol);
- read_subvol = afr_read_subvol_select_by_policy (inode, this,
- local->readable, NULL);
- if (read_subvol == -1)
- AFR_READ_TXN_SET_ERROR_AND_GOTO (-1, EIO, -1, readfn);
+ return 0;
+}
- if (local->read_attempted[read_subvol]) {
- afr_read_txn_next_subvol (frame, this);
- return 0;
- }
+static int
+afr_ta_read_txn_done(int ret, call_frame_t *ta_frame, void *opaque)
+{
+ STACK_DESTROY(ta_frame->root);
+ return 0;
+}
- local->read_attempted[read_subvol] = 1;
-readfn:
- if (read_subvol == -1) {
- ret = afr_inode_split_brain_choice_get (inode, this,
- &spb_choice);
- if ((ret == 0) && spb_choice >= 0)
- read_subvol = spb_choice;
- }
- local->readfn (frame, this, read_subvol);
+static int
+afr_ta_read_txn(void *opaque)
+{
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ int read_subvol = -1;
+ int query_child = AFR_CHILD_UNKNOWN;
+ int possible_bad_child = AFR_CHILD_UNKNOWN;
+ int ret = 0;
+ int op_errno = ENOMEM;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+ dict_t *xdata_req = NULL;
+ dict_t *xdata_rsp = NULL;
+ int **pending = NULL;
+ loc_t loc = {
+ 0,
+ };
+
+ frame = (call_frame_t *)opaque;
+ this = frame->this;
+ local = frame->local;
+ priv = this->private;
+ query_child = local->read_txn_query_child;
+
+ if (query_child == AFR_CHILD_ZERO) {
+ possible_bad_child = AFR_CHILD_ONE;
+ } else if (query_child == AFR_CHILD_ONE) {
+ possible_bad_child = AFR_CHILD_ZERO;
+ } else {
+ /*read_txn_query_child is AFR_CHILD_UNKNOWN*/
+ goto out;
+ }
+
+ /* Ask the query_child to see if it blames the possibly bad one. */
+ xdata_req = dict_new();
+ if (!xdata_req)
+ goto out;
+
+ pending = afr_matrix_create(priv->child_count, AFR_NUM_CHANGE_LOGS);
+ if (!pending)
+ goto out;
+
+ ret = afr_set_pending_dict(priv, xdata_req, pending);
+ if (ret < 0)
+ goto out;
+
+ if (local->fd) {
+ ret = syncop_fxattrop(priv->children[query_child], local->fd,
+ GF_XATTROP_ADD_ARRAY, xdata_req, NULL, &xdata_rsp,
+ NULL);
+ } else {
+ ret = syncop_xattrop(priv->children[query_child], &local->loc,
+ GF_XATTROP_ADD_ARRAY, xdata_req, NULL, &xdata_rsp,
+ NULL);
+ }
+ if (ret || !xdata_rsp) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed xattrop for gfid %s on %s",
+ uuid_utoa(local->inode->gfid),
+ priv->children[query_child]->name);
+ op_errno = -ret;
+ goto out;
+ }
+
+ if (afr_ta_dict_contains_pending_xattr(xdata_rsp, priv,
+ possible_bad_child)) {
+ read_subvol = query_child;
+ goto out;
+ }
+ dict_unref(xdata_rsp);
+ xdata_rsp = NULL;
+
+ /* It doesn't. So query thin-arbiter to see if it blames any data brick. */
+ ret = afr_fill_ta_loc(this, &loc, _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to populate thin-arbiter loc for: %s.", loc.name);
+ goto out;
+ }
+ flock.l_type = F_WRLCK; /*start and length are already zero. */
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_MODIFY, &loc, F_SETLKW, &flock, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "gfid:%s: Failed to get AFR_TA_DOM_MODIFY lock on %s.",
+ uuid_utoa(local->inode->gfid),
+ priv->pending_key[THIN_ARBITER_BRICK_INDEX]);
+ op_errno = -ret;
+ goto out;
+ }
+
+ ret = syncop_xattrop(priv->children[THIN_ARBITER_BRICK_INDEX], &loc,
+ GF_XATTROP_ADD_ARRAY, xdata_req, NULL, &xdata_rsp,
+ NULL);
+ if (ret || !xdata_rsp) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "gfid:%s: Failed xattrop on %s.", uuid_utoa(local->inode->gfid),
+ priv->pending_key[THIN_ARBITER_BRICK_INDEX]);
+ op_errno = -ret;
+ goto unlock;
+ }
+
+ if (!afr_ta_dict_contains_pending_xattr(xdata_rsp, priv, query_child)) {
+ read_subvol = query_child;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, EIO, AFR_MSG_THIN_ARB,
+ "Failing read for gfid %s since good brick %s is down",
+ uuid_utoa(local->inode->gfid),
+ priv->children[possible_bad_child]->name);
+ op_errno = EIO;
+ }
+
+unlock:
+ flock.l_type = F_UNLCK;
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_MODIFY, &loc, F_SETLK, &flock, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "gfid:%s: Failed to unlock AFR_TA_DOM_MODIFY lock on "
+ "%s.",
+ uuid_utoa(local->inode->gfid),
+ priv->pending_key[THIN_ARBITER_BRICK_INDEX]);
+ }
+out:
+ if (xdata_req)
+ dict_unref(xdata_req);
+ if (xdata_rsp)
+ dict_unref(xdata_rsp);
+ if (pending)
+ afr_matrix_cleanup(pending, priv->child_count);
+ loc_wipe(&loc);
+
+ if (read_subvol == -1) {
+ local->op_ret = -1;
+ local->op_errno = op_errno;
+ }
+ afr_read_txn_wind(frame, this, read_subvol);
+ return ret;
+}
- return 0;
+void
+afr_ta_read_txn_synctask(call_frame_t *frame, xlator_t *this)
+{
+ call_frame_t *ta_frame = NULL;
+ afr_local_t *local = NULL;
+ int ret = 0;
+
+ local = frame->local;
+ ta_frame = afr_ta_frame_create(this);
+ if (!ta_frame) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to create ta_frame");
+ goto out;
+ }
+ ret = synctask_new(this->ctx->env, afr_ta_read_txn, afr_ta_read_txn_done,
+ ta_frame, frame);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to launch "
+ "afr_ta_read_txn synctask for gfid %s.",
+ uuid_utoa(local->inode->gfid));
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ STACK_DESTROY(ta_frame->root);
+ goto out;
+ }
+ return;
+out:
+ afr_read_txn_wind(frame, this, -1);
}
+int
+afr_read_txn_refresh_done(call_frame_t *frame, xlator_t *this, int err)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int read_subvol = -1;
+ inode_t *inode = NULL;
+ int ret = -1;
+ int spb_subvol = -1;
+
+ local = frame->local;
+ inode = local->inode;
+ priv = this->private;
+
+ if (err) {
+ if (!priv->thin_arbiter_count)
+ goto readfn;
+ if (err != EINVAL)
+ goto readfn;
+ /* We need to query the good bricks and/or thin-arbiter.*/
+ afr_ta_read_txn_synctask(frame, this);
+ return 0;
+ }
+
+ read_subvol = afr_read_subvol_select_by_policy(inode, this, local->readable,
+ NULL);
+ if (read_subvol == -1) {
+ err = EIO;
+ goto readfn;
+ }
+
+ if (local->read_attempted[read_subvol]) {
+ afr_read_txn_next_subvol(frame, this);
+ return 0;
+ }
+
+ local->read_attempted[read_subvol] = 1;
+readfn:
+ if (read_subvol == -1) {
+ ret = afr_split_brain_read_subvol_get(inode, this, frame, &spb_subvol);
+ if ((ret == 0) && spb_subvol >= 0)
+ read_subvol = spb_subvol;
+ }
+
+ if (read_subvol == -1) {
+ AFR_SET_ERROR_AND_CHECK_SPLIT_BRAIN(-1, err);
+ }
+ afr_read_txn_wind(frame, this, read_subvol);
+
+ return 0;
+}
int
-afr_read_txn_continue (call_frame_t *frame, xlator_t *this, int subvol)
+afr_read_txn_continue(call_frame_t *frame, xlator_t *this, int subvol)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- if (!local->refreshed) {
- local->refreshed = _gf_true;
- afr_inode_refresh (frame, this, local->inode,
- afr_read_txn_refresh_done);
- } else {
- afr_read_txn_next_subvol (frame, this);
- }
+ if (!local->refreshed) {
+ local->refreshed = _gf_true;
+ afr_inode_refresh(frame, this, local->inode, NULL,
+ afr_read_txn_refresh_done);
+ } else {
+ afr_read_txn_next_subvol(frame, this);
+ }
- return 0;
+ return 0;
}
-
/* afr_read_txn_wipe:
clean internal variables in @local in order to make
@@ -139,27 +342,26 @@ afr_read_txn_continue (call_frame_t *frame, xlator_t *this, int subvol)
*/
void
-afr_read_txn_wipe (call_frame_t *frame, xlator_t *this)
+afr_read_txn_wipe(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- local->readfn = NULL;
+ local->readfn = NULL;
- if (local->inode)
- inode_unref (local->inode);
+ if (local->inode)
+ inode_unref(local->inode);
- for (i = 0; i < priv->child_count; i++) {
- local->read_attempted[i] = 0;
- local->readable[i] = 0;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ local->read_attempted[i] = 0;
+ local->readable[i] = 0;
+ }
}
-
/*
afr_read_txn:
@@ -188,87 +390,105 @@ afr_read_txn_wipe (call_frame_t *frame, xlator_t *this)
*/
int
-afr_read_txn (call_frame_t *frame, xlator_t *this, inode_t *inode,
- afr_read_txn_wind_t readfn, afr_transaction_type type)
+afr_read_txn(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ afr_read_txn_wind_t readfn, afr_transaction_type type)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- unsigned char *data = NULL;
- unsigned char *metadata = NULL;
- int read_subvol = -1;
- int event_generation = 0;
- int ret = -1;
-
- priv = this->private;
- local = frame->local;
- data = alloca0 (priv->child_count);
- metadata = alloca0 (priv->child_count);
-
- afr_read_txn_wipe (frame, this);
-
- local->readfn = readfn;
- local->inode = inode_ref (inode);
-
- if (priv->quorum_reads &&
- priv->quorum_count && !afr_has_quorum (priv->child_up, this)) {
- local->op_ret = -1;
- local->op_errno = ENOTCONN;
- read_subvol = -1;
- goto read;
- }
-
- local->transaction.type = type;
- if (local->op == GF_FOP_FSTAT || local->op == GF_FOP_STAT) {
- ret = afr_inode_read_subvol_get (inode, this, data, metadata,
- &event_generation);
- AFR_INTERSECT (local->readable, data, metadata,
- priv->child_count);
- } else {
- ret = afr_inode_read_subvol_type_get (inode, this, local->readable,
- &event_generation, type);
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ unsigned char *data = NULL;
+ unsigned char *metadata = NULL;
+ int read_subvol = -1;
+ int event_generation = 0;
+ int ret = -1;
+
+ priv = this->private;
+ local = frame->local;
+ data = alloca0(priv->child_count);
+ metadata = alloca0(priv->child_count);
+
+ afr_read_txn_wipe(frame, this);
+
+ local->readfn = readfn;
+ local->inode = inode_ref(inode);
+ local->is_read_txn = _gf_true;
+ local->transaction.type = type;
+
+ if (priv->quorum_count && !afr_has_quorum(local->child_up, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = afr_quorum_errno(priv);
+ goto read;
+ }
+
+ if (!afr_is_consistent_io_possible(local, priv, &local->op_errno)) {
+ local->op_ret = -1;
+ goto read;
+ }
+
+ if (priv->thin_arbiter_count && !afr_ta_has_quorum(priv, local)) {
+ local->op_ret = -1;
+ local->op_errno = -afr_quorum_errno(priv);
+ goto read;
+ }
+
+ if (priv->thin_arbiter_count &&
+ AFR_COUNT(local->child_up, priv->child_count) != priv->child_count) {
+ if (local->child_up[0]) {
+ local->read_txn_query_child = AFR_CHILD_ZERO;
+ } else if (local->child_up[1]) {
+ local->read_txn_query_child = AFR_CHILD_ONE;
}
- if (ret == -1)
- /* very first transaction on this inode */
- goto refresh;
-
- gf_msg_debug (this->name, 0, "%s: generation now vs cached: %d, "
- "%d", uuid_utoa (inode->gfid), local->event_generation,
- event_generation);
- if (local->event_generation != event_generation)
- /* servers have disconnected / reconnected, and possibly
- rebooted, very likely changing the state of freshness
- of copies */
- goto refresh;
-
- read_subvol = afr_read_subvol_select_by_policy (inode, this,
- local->readable, NULL);
-
- if (read_subvol < 0 || read_subvol > priv->child_count) {
- gf_msg (this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN,
- "Unreadable subvolume %d found with event generation "
- "%d for gfid %s. (Possible split-brain)",
- read_subvol, event_generation, uuid_utoa(inode->gfid));
- goto refresh;
- }
-
- if (!local->child_up[read_subvol]) {
- /* should never happen, just in case */
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_READ_SUBVOL_ERROR, "subvolume %d is the "
- "read subvolume in this generation, but is not up",
- read_subvol);
- goto refresh;
- }
-
- local->read_attempted[read_subvol] = 1;
+ afr_ta_read_txn_synctask(frame, this);
+ return 0;
+ }
+
+ ret = afr_inode_read_subvol_get(inode, this, data, metadata,
+ &event_generation);
+ if (ret == -1)
+ /* very first transaction on this inode */
+ goto refresh;
+ AFR_INTERSECT(local->readable, data, metadata, priv->child_count);
+
+ gf_msg_debug(this->name, 0,
+ "%s: generation now vs cached: %d, "
+ "%d",
+ uuid_utoa(inode->gfid), local->event_generation,
+ event_generation);
+ if (afr_is_inode_refresh_reqd(inode, this, local->event_generation,
+ event_generation))
+ /* servers have disconnected / reconnected, and possibly
+ rebooted, very likely changing the state of freshness
+ of copies */
+ goto refresh;
+
+ read_subvol = afr_read_subvol_select_by_policy(inode, this, local->readable,
+ NULL);
+
+ if (read_subvol < 0 || read_subvol > priv->child_count) {
+ gf_msg_debug(this->name, 0,
+ "Unreadable subvolume %d found "
+ "with event generation %d for gfid %s.",
+ read_subvol, event_generation, uuid_utoa(inode->gfid));
+ goto refresh;
+ }
+
+ if (!local->child_up[read_subvol]) {
+ /* should never happen, just in case */
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_READ_SUBVOL_ERROR,
+ "subvolume %d is the "
+ "read subvolume in this generation, but is not up",
+ read_subvol);
+ goto refresh;
+ }
+
+ local->read_attempted[read_subvol] = 1;
read:
- local->readfn (frame, this, read_subvol);
+ afr_read_txn_wind(frame, this, read_subvol);
- return 0;
+ return 0;
refresh:
- afr_inode_refresh (frame, this, inode, afr_read_txn_refresh_done);
+ afr_inode_refresh(frame, this, inode, NULL, afr_read_txn_refresh_done);
- return 0;
+ return 0;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
index 3f4e9703546..a580a1584cc 100644
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
@@ -8,305 +8,808 @@
cases as published by the Free Software Foundation.
*/
-
#include "afr.h"
#include "afr-self-heal.h"
-#include "byte-order.h"
+#include <glusterfs/byte-order.h>
#include "protocol-common.h"
#include "afr-messages.h"
+#include <glusterfs/events.h>
void
-afr_heal_synctask (xlator_t *this, afr_local_t *local);
+afr_heal_synctask(xlator_t *this, afr_local_t *local);
int
-afr_selfheal_post_op_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, dict_t *xattr, dict_t *xdata)
+afr_lookup_and_heal_gfid(xlator_t *this, inode_t *parent, const char *name,
+ inode_t *inode, struct afr_reply *replies, int source,
+ unsigned char *sources, void *gfid, int *gfid_idx)
{
- afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ call_frame_t *frame = NULL;
+ afr_local_t *local = NULL;
+ unsigned char *wind_on = NULL;
+ ia_type_t ia_type = IA_INVAL;
+ dict_t *xdata = NULL;
+ loc_t loc = {
+ 0,
+ };
+ int ret = 0;
+ int i = 0;
+
+ priv = this->private;
+ wind_on = alloca0(priv->child_count);
+ if (source >= 0 && replies[source].valid && replies[source].op_ret == 0)
+ ia_type = replies[source].poststat.ia_type;
+
+ if (ia_type != IA_INVAL)
+ goto heal;
+
+ /* If ia_type is still invalid, it means either
+ * (a)'source' was -1, i.e. parent dir pending xattrs are in split-brain
+ * (or) (b) The parent dir pending xattrs are all zeroes (i.e. all bricks
+ * are sources) and the 'source' we selected earlier might be the one where
+ * the file is not actually present.
+ *
+ * In both cases, let us pick a brick with a successful reply and use its
+ * ia_type.
+ * */
+ for (i = 0; i < priv->child_count; i++) {
+ if (source == -1) {
+ /* case (a) above. */
+ if (replies[i].valid && replies[i].op_ret == 0 &&
+ replies[i].poststat.ia_type != IA_INVAL) {
+ ia_type = replies[i].poststat.ia_type;
+ break;
+ }
+ } else {
+ /* case (b) above. */
+ if (i == source)
+ continue;
+ if (sources[i] && replies[i].valid && replies[i].op_ret == 0 &&
+ replies[i].poststat.ia_type != IA_INVAL) {
+ ia_type = replies[i].poststat.ia_type;
+ break;
+ }
+ }
+ }
+
+heal:
+ /* gfid heal on those subvolumes that do not have gfid associated
+ * with the inode and update those replies.
+ */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret != 0)
+ continue;
+
+ if (gf_uuid_is_null(gfid) &&
+ !gf_uuid_is_null(replies[i].poststat.ia_gfid) &&
+ replies[i].poststat.ia_type == ia_type)
+ gfid = replies[i].poststat.ia_gfid;
- local = frame->local;
+ if (!gf_uuid_is_null(replies[i].poststat.ia_gfid) ||
+ replies[i].poststat.ia_type != ia_type)
+ continue;
- syncbarrier_wake (&local->barrier);
+ wind_on[i] = 1;
+ }
+
+ if (AFR_COUNT(wind_on, priv->child_count) == 0)
+ return 0;
+
+ xdata = dict_new();
+ if (!xdata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = dict_set_gfuuid(xdata, "gfid-req", gfid, true);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ frame = afr_frame_create(this, &ret);
+ if (!frame) {
+ ret = -ret;
+ goto out;
+ }
+
+ local = frame->local;
+ loc.parent = inode_ref(parent);
+ gf_uuid_copy(loc.pargfid, parent->gfid);
+ loc.name = name;
+ loc.inode = inode_ref(inode);
+
+ AFR_ONLIST(wind_on, frame, afr_selfheal_discover_cbk, lookup, &loc, xdata);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!wind_on[i])
+ continue;
+ afr_reply_wipe(&replies[i]);
+ afr_reply_copy(&replies[i], &local->replies[i]);
+ }
+ if (gfid_idx && (*gfid_idx == -1)) {
+ /*Pick a brick where the gifd heal was successful.*/
+ for (i = 0; i < priv->child_count; i++) {
+ if (!wind_on[i])
+ continue;
+ if (replies[i].valid && replies[i].op_ret == 0 &&
+ !gf_uuid_is_null(replies[i].poststat.ia_gfid)) {
+ *gfid_idx = i;
+ break;
+ }
+ }
+ }
+out:
+ if (gfid_idx && (*gfid_idx == -1) && (ret == 0) && local) {
+ ret = -afr_final_errno(local, priv);
+ }
+ loc_wipe(&loc);
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ if (xdata)
+ dict_unref(xdata);
+
+ return ret;
+}
+
+int
+afr_gfid_sbrain_source_from_src_brick(xlator_t *this, struct afr_reply *replies,
+ char *src_brick)
+{
+ int i = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ if (strcmp(priv->children[i]->name, src_brick) == 0)
+ return i;
+ }
+ return -1;
+}
+
+int
+afr_selfheal_gfid_mismatch_by_majority(struct afr_reply *replies,
+ int child_count)
+{
+ int j = 0;
+ int i = 0;
+ int votes;
+
+ for (i = 0; i < child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+
+ votes = 1;
+ for (j = i + 1; j < child_count; j++) {
+ if ((!gf_uuid_compare(replies[i].poststat.ia_gfid,
+ replies[j].poststat.ia_gfid)))
+ votes++;
+ if (votes > child_count / 2)
+ return i;
+ }
+ }
+
+ return -1;
+}
- return 0;
+int
+afr_gfid_sbrain_source_from_bigger_file(struct afr_reply *replies,
+ int child_count)
+{
+ int i = 0;
+ int src = -1;
+ uint64_t size = 0;
+
+ for (i = 0; i < child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret == -1)
+ continue;
+ if (size < replies[i].poststat.ia_size) {
+ src = i;
+ size = replies[i].poststat.ia_size;
+ } else if (replies[i].poststat.ia_size == size) {
+ src = -1;
+ }
+ }
+ return src;
}
+int
+afr_gfid_sbrain_source_from_latest_mtime(struct afr_reply *replies,
+ int child_count)
+{
+ int i = 0;
+ int src = -1;
+ uint32_t mtime = 0;
+ uint32_t mtime_nsec = 0;
+
+ for (i = 0; i < child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret != 0)
+ continue;
+ if ((mtime < replies[i].poststat.ia_mtime) ||
+ ((mtime == replies[i].poststat.ia_mtime) &&
+ (mtime_nsec < replies[i].poststat.ia_mtime_nsec))) {
+ src = i;
+ mtime = replies[i].poststat.ia_mtime;
+ mtime_nsec = replies[i].poststat.ia_mtime_nsec;
+ } else if ((mtime == replies[i].poststat.ia_mtime) &&
+ (mtime_nsec == replies[i].poststat.ia_mtime_nsec)) {
+ src = -1;
+ }
+ }
+ return src;
+}
int
-afr_selfheal_post_op (call_frame_t *frame, xlator_t *this, inode_t *inode,
- int subvol, dict_t *xattr)
+afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+ inode_t *inode, uuid_t pargfid, const char *bname,
+ int src_idx, int child_idx,
+ unsigned char *locked_on, int *src, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- loc_t loc = {0, };
+ afr_private_t *priv = NULL;
+ char g1[64] = {
+ 0,
+ };
+ char g2[64] = {
+ 0,
+ };
+ int up_count = 0;
+ int heal_op = -1;
+ int ret = -1;
+ char *src_brick = NULL;
+
+ *src = -1;
+ priv = this->private;
+ up_count = AFR_COUNT(locked_on, priv->child_count);
+ if (up_count != priv->child_count) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "All the bricks should be up to resolve the gfid split "
+ "barin");
+ if (xdata) {
+ ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ SALL_BRICKS_UP_TO_RESOLVE);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_DICT_SET_FAILED,
+ "Error setting"
+ " gfid-heal-msg dict");
+ }
+ goto out;
+ }
- priv = this->private;
- local = frame->local;
+ if (xdata) {
+ ret = dict_get_int32_sizen(xdata, "heal-op", &heal_op);
+ if (ret)
+ goto fav_child;
+ } else {
+ goto fav_child;
+ }
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
+ switch (heal_op) {
+ case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:
+ *src = afr_gfid_sbrain_source_from_bigger_file(replies,
+ priv->child_count);
+ if (*src == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ SNO_BIGGER_FILE);
+ if (xdata) {
+ ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ SNO_BIGGER_FILE);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ AFR_MSG_DICT_SET_FAILED,
+ "Error"
+ " setting gfid-heal-msg dict");
+ }
+ }
+ break;
- STACK_WIND (frame, afr_selfheal_post_op_cbk, priv->children[subvol],
- priv->children[subvol]->fops->xattrop, &loc,
- GF_XATTROP_ADD_ARRAY, xattr, NULL);
+ case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:
+ *src = afr_gfid_sbrain_source_from_latest_mtime(replies,
+ priv->child_count);
+ if (*src == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ SNO_DIFF_IN_MTIME);
+ if (xdata) {
+ ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ SNO_DIFF_IN_MTIME);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ AFR_MSG_DICT_SET_FAILED,
+ "Error"
+ "setting gfid-heal-msg dict");
+ }
+ }
+ break;
- syncbarrier_wait (&local->barrier, 1);
+ case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
+ ret = dict_get_str_sizen(xdata, "child-name", &src_brick);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "Error getting the source "
+ "brick");
+ break;
+ }
+ *src = afr_gfid_sbrain_source_from_src_brick(this, replies,
+ src_brick);
+ if (*src == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ SERROR_GETTING_SRC_BRICK);
+ if (xdata) {
+ ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ SERROR_GETTING_SRC_BRICK);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ AFR_MSG_DICT_SET_FAILED,
+ "Error"
+ " setting gfid-heal-msg dict");
+ }
+ }
+ break;
- loc_wipe (&loc);
+ default:
+ break;
+ }
+ goto out;
+
+fav_child:
+ switch (priv->fav_child_policy) {
+ case AFR_FAV_CHILD_BY_SIZE:
+ *src = afr_sh_fav_by_size(this, replies, inode);
+ break;
+ case AFR_FAV_CHILD_BY_MTIME:
+ *src = afr_sh_fav_by_mtime(this, replies, inode);
+ break;
+ case AFR_FAV_CHILD_BY_CTIME:
+ *src = afr_sh_fav_by_ctime(this, replies, inode);
+ break;
+ case AFR_FAV_CHILD_BY_MAJORITY:
+ if (priv->child_count != 2)
+ *src = afr_selfheal_gfid_mismatch_by_majority(
+ replies, priv->child_count);
+ else
+ *src = -1;
+
+ if (*src == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "No majority to resolve "
+ "gfid split brain");
+ }
+ break;
+ default:
+ break;
+ }
- return 0;
+out:
+ if (*src == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "Gfid mismatch detected for <gfid:%s>/%s>, %s on %s and"
+ " %s on %s.",
+ uuid_utoa(pargfid), bname,
+ uuid_utoa_r(replies[child_idx].poststat.ia_gfid, g1),
+ priv->children[child_idx]->name,
+ uuid_utoa_r(replies[src_idx].poststat.ia_gfid, g2),
+ priv->children[src_idx]->name);
+ gf_event(EVENT_AFR_SPLIT_BRAIN,
+ "client-pid=%d;"
+ "subvol=%s;type=gfid;file="
+ "<gfid:%s>/%s>;count=2;child-%d=%s;gfid-%d=%s;"
+ "child-%d=%s;gfid-%d=%s",
+ this->ctx->cmd_args.client_pid, this->name, uuid_utoa(pargfid),
+ bname, child_idx, priv->children[child_idx]->name, child_idx,
+ uuid_utoa_r(replies[child_idx].poststat.ia_gfid, g1), src_idx,
+ priv->children[src_idx]->name, src_idx,
+ uuid_utoa_r(replies[src_idx].poststat.ia_gfid, g2));
+ return -1;
+ }
+ return 0;
}
int
-afr_check_stale_error (struct afr_reply *replies, afr_private_t *priv)
+afr_selfheal_post_op_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xattr, dict_t *xdata)
{
- int i = 0;
- int op_errno = 0;
- int tmp_errno = 0;
- int stale_count = 0;
+ afr_local_t *local = NULL;
- for (i = 0; i < priv->child_count; i++) {
- tmp_errno = replies[i].op_errno;
- if (tmp_errno == ENOENT || tmp_errno == ESTALE) {
- op_errno = afr_higher_errno (op_errno, tmp_errno);
- stale_count++;
- }
- }
- if (stale_count != priv->child_count)
- return -ENOTCONN;
- else
- return -op_errno;
+ local = frame->local;
+
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
+ syncbarrier_wake(&local->barrier);
+
+ return 0;
}
+int
+afr_selfheal_post_op(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ int subvol, dict_t *xattr, dict_t *xdata)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ loc_t loc = {
+ 0,
+ };
+ int ret = 0;
-dict_t *
-afr_selfheal_output_xattr (xlator_t *this, afr_transaction_type type,
- int *output_dirty, int **output_matrix, int subvol)
-{
- dict_t *xattr = NULL;
- afr_private_t *priv = NULL;
- int j = 0;
- int idx = 0;
- int ret = 0;
- int *raw = 0;
-
- priv = this->private;
- idx = afr_index_for_transaction_type (type);
-
- xattr = dict_new ();
- if (!xattr)
- return NULL;
-
- /* clear dirty */
- raw = GF_CALLOC (sizeof(int), AFR_NUM_CHANGE_LOGS, gf_afr_mt_int32_t);
- if (!raw)
- goto err;
-
- raw[idx] = hton32 (output_dirty[subvol]);
- ret = dict_set_bin (xattr, AFR_DIRTY, raw,
- sizeof(int) * AFR_NUM_CHANGE_LOGS);
- if (ret) {
- GF_FREE (raw);
- goto err;
- }
+ priv = this->private;
+ local = frame->local;
- /* clear/set pending */
- for (j = 0; j < priv->child_count; j++) {
- raw = GF_CALLOC (sizeof(int), AFR_NUM_CHANGE_LOGS,
- gf_afr_mt_int32_t);
- if (!raw)
- goto err;
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- raw[idx] = hton32 (output_matrix[subvol][j]);
+ local->op_ret = 0;
- ret = dict_set_bin (xattr, priv->pending_key[j],
- raw, sizeof(int) * AFR_NUM_CHANGE_LOGS);
- if (ret) {
- GF_FREE (raw);
- goto err;
- }
- }
+ STACK_WIND(frame, afr_selfheal_post_op_cbk, priv->children[subvol],
+ priv->children[subvol]->fops->xattrop, &loc,
+ GF_XATTROP_ADD_ARRAY, xattr, xdata);
- return xattr;
-err:
- if (xattr)
- dict_unref (xattr);
- return NULL;
+ syncbarrier_wait(&local->barrier, 1);
+ if (local->op_ret < 0)
+ ret = -local->op_errno;
+
+ loc_wipe(&loc);
+ local->op_ret = 0;
+
+ return ret;
}
+int
+afr_check_stale_error(struct afr_reply *replies, afr_private_t *priv)
+{
+ int i = 0;
+ int op_errno = 0;
+ int tmp_errno = 0;
+ int stale_count = 0;
+
+ for (i = 0; i < priv->child_count; i++) {
+ tmp_errno = replies[i].op_errno;
+ if (tmp_errno == ENOENT || tmp_errno == ESTALE) {
+ op_errno = afr_higher_errno(op_errno, tmp_errno);
+ stale_count++;
+ }
+ }
+ if (stale_count != priv->child_count)
+ return -ENOTCONN;
+ else
+ return -op_errno;
+}
int
-afr_selfheal_undo_pending (call_frame_t *frame, xlator_t *this, inode_t *inode,
- unsigned char *sources, unsigned char *sinks,
- unsigned char *healed_sinks, afr_transaction_type type,
- struct afr_reply *replies, unsigned char *locked_on)
+afr_sh_generic_fop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *pre,
+ struct iatt *post, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- int i = 0;
- int j = 0;
- unsigned char *pending = NULL;
- int *input_dirty = NULL;
- int **input_matrix = NULL;
- int *output_dirty = NULL;
- int **output_matrix = NULL;
- dict_t *xattr = NULL;
+ int i = (long)cookie;
+ afr_local_t *local = NULL;
- priv = this->private;
+ local = frame->local;
- pending = alloca0 (priv->child_count);
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+ if (pre)
+ local->replies[i].prestat = *pre;
+ if (post)
+ local->replies[i].poststat = *post;
+ if (xdata)
+ local->replies[i].xdata = dict_ref(xdata);
- input_dirty = alloca0 (priv->child_count * sizeof (int));
- input_matrix = ALLOC_MATRIX (priv->child_count, int);
- output_dirty = alloca0 (priv->child_count * sizeof (int));
- output_matrix = ALLOC_MATRIX (priv->child_count, int);
+ syncbarrier_wake(&local->barrier);
- afr_selfheal_extract_xattr (this, replies, type, input_dirty,
- input_matrix);
+ return 0;
+}
- for (i = 0; i < priv->child_count; i++)
- if (sinks[i] && !healed_sinks[i])
- pending[i] = 1;
+int
+afr_selfheal_restore_time(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ int source, unsigned char *healed_sinks,
+ struct afr_reply *replies)
+{
+ loc_t loc = {
+ 0,
+ };
- for (i = 0; i < priv->child_count; i++) {
- for (j = 0; j < priv->child_count; j++) {
- if (pending[j])
- output_matrix[i][j] = 1;
- else
- output_matrix[i][j] = -input_matrix[i][j];
- }
- }
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- for (i = 0; i < priv->child_count; i++) {
- if (!pending[i])
- output_dirty[i] = -input_dirty[i];
- }
+ AFR_ONLIST(healed_sinks, frame, afr_sh_generic_fop_cbk, setattr, &loc,
+ &replies[source].poststat,
+ (GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME | GF_SET_ATTR_CTIME),
+ NULL);
- for (i = 0; i < priv->child_count; i++) {
- if (!locked_on[i])
- /* perform post-op only on subvols we had locked
- and inspected on.
- */
- continue;
+ loc_wipe(&loc);
- xattr = afr_selfheal_output_xattr (this, type, output_dirty,
- output_matrix, i);
- if (!xattr) {
- continue;
- }
+ return 0;
+}
- afr_selfheal_post_op (frame, this, inode, i, xattr);
+dict_t *
+afr_selfheal_output_xattr(xlator_t *this, gf_boolean_t is_full_crawl,
+ afr_transaction_type type, int *output_dirty,
+ int **output_matrix, int subvol,
+ int **full_heal_mtx_out)
+{
+ int j = 0;
+ int idx = 0;
+ int d_idx = 0;
+ int ret = 0;
+ int *raw = 0;
+ dict_t *xattr = NULL;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ idx = afr_index_for_transaction_type(type);
+ d_idx = afr_index_for_transaction_type(AFR_DATA_TRANSACTION);
+
+ xattr = dict_new();
+ if (!xattr)
+ return NULL;
- dict_unref (xattr);
- }
+ /* clear dirty */
+ raw = GF_CALLOC(sizeof(int), AFR_NUM_CHANGE_LOGS, gf_afr_mt_int32_t);
+ if (!raw)
+ goto err;
+
+ raw[idx] = hton32(output_dirty[subvol]);
+ ret = dict_set_bin(xattr, AFR_DIRTY, raw,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ if (ret) {
+ GF_FREE(raw);
+ goto err;
+ }
+
+ /* clear/set pending */
+ for (j = 0; j < priv->child_count; j++) {
+ raw = GF_CALLOC(sizeof(int), AFR_NUM_CHANGE_LOGS, gf_afr_mt_int32_t);
+ if (!raw)
+ goto err;
+
+ raw[idx] = hton32(output_matrix[subvol][j]);
+ if (is_full_crawl)
+ raw[d_idx] = hton32(full_heal_mtx_out[subvol][j]);
+
+ ret = dict_set_bin(xattr, priv->pending_key[j], raw,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ if (ret) {
+ GF_FREE(raw);
+ goto err;
+ }
+ }
- return 0;
+ return xattr;
+err:
+ if (xattr)
+ dict_unref(xattr);
+ return NULL;
}
+int
+afr_selfheal_undo_pending(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ unsigned char *sources, unsigned char *sinks,
+ unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ afr_transaction_type type, struct afr_reply *replies,
+ unsigned char *locked_on)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+ int j = 0;
+ unsigned char *pending = NULL;
+ int *input_dirty = NULL;
+ int **input_matrix = NULL;
+ int **full_heal_mtx_in = NULL;
+ int **full_heal_mtx_out = NULL;
+ int *output_dirty = NULL;
+ int **output_matrix = NULL;
+ dict_t *xattr = NULL;
+ dict_t *xdata = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ pending = alloca0(priv->child_count);
+
+ input_dirty = alloca0(priv->child_count * sizeof(int));
+ input_matrix = ALLOC_MATRIX(priv->child_count, int);
+ full_heal_mtx_in = ALLOC_MATRIX(priv->child_count, int);
+ full_heal_mtx_out = ALLOC_MATRIX(priv->child_count, int);
+ output_dirty = alloca0(priv->child_count * sizeof(int));
+ output_matrix = ALLOC_MATRIX(priv->child_count, int);
+
+ xdata = dict_new();
+ if (!xdata)
+ return -1;
+
+ afr_selfheal_extract_xattr(this, replies, type, input_dirty, input_matrix);
+
+ if (local->need_full_crawl)
+ afr_selfheal_extract_xattr(this, replies, AFR_DATA_TRANSACTION, NULL,
+ full_heal_mtx_in);
+
+ for (i = 0; i < priv->child_count; i++)
+ if (sinks[i] && !healed_sinks[i])
+ pending[i] = 1;
+
+ for (i = 0; i < priv->child_count; i++) {
+ for (j = 0; j < priv->child_count; j++) {
+ if (pending[j]) {
+ output_matrix[i][j] = 1;
+ if (type == AFR_ENTRY_TRANSACTION)
+ full_heal_mtx_out[i][j] = 1;
+ } else if (locked_on[j]) {
+ output_matrix[i][j] = -input_matrix[i][j];
+ if (type == AFR_ENTRY_TRANSACTION)
+ full_heal_mtx_out[i][j] = -full_heal_mtx_in[i][j];
+ }
+ }
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!pending[i])
+ output_dirty[i] = -input_dirty[i];
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!locked_on[i])
+ /* perform post-op only on subvols we had locked
+ and inspected on.
+ */
+ continue;
+ if (undid_pending[i])
+ /* We already unset the pending xattrs in
+ * _afr_fav_child_reset_sink_xattrs(). */
+ continue;
+
+ xattr = afr_selfheal_output_xattr(this, local->need_full_crawl, type,
+ output_dirty, output_matrix, i,
+ full_heal_mtx_out);
+ if (!xattr) {
+ continue;
+ }
+
+ if ((type == AFR_ENTRY_TRANSACTION) && (priv->esh_granular)) {
+ if (xdata && dict_set_int8(xdata, GF_XATTROP_PURGE_INDEX, 1))
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_DICT_SET_FAILED,
+ "Failed to set"
+ " dict value for %s",
+ GF_XATTROP_PURGE_INDEX);
+ }
+
+ afr_selfheal_post_op(frame, this, inode, i, xattr, xdata);
+ dict_unref(xattr);
+ }
+
+ if (xdata)
+ dict_unref(xdata);
+
+ return 0;
+}
void
-afr_replies_copy (struct afr_reply *dst, struct afr_reply *src, int count)
-{
- int i = 0;
- dict_t *xdata = NULL;
-
- if (dst == src)
- return;
-
- for (i = 0; i < count; i++) {
- dst[i].valid = src[i].valid;
- dst[i].op_ret = src[i].op_ret;
- dst[i].op_errno = src[i].op_errno;
- dst[i].prestat = src[i].prestat;
- dst[i].poststat = src[i].poststat;
- dst[i].preparent = src[i].preparent;
- dst[i].postparent = src[i].postparent;
- dst[i].preparent2 = src[i].preparent2;
- dst[i].postparent2 = src[i].postparent2;
- if (src[i].xdata)
- xdata = dict_ref (src[i].xdata);
- else
- xdata = NULL;
- if (dst[i].xdata)
- dict_unref (dst[i].xdata);
- dst[i].xdata = xdata;
- memcpy (dst[i].checksum, src[i].checksum,
- MD5_DIGEST_LENGTH);
- }
+afr_reply_copy(struct afr_reply *dst, struct afr_reply *src)
+{
+ dict_t *xdata = NULL;
+
+ dst->valid = src->valid;
+ dst->op_ret = src->op_ret;
+ dst->op_errno = src->op_errno;
+ dst->prestat = src->prestat;
+ dst->poststat = src->poststat;
+ dst->preparent = src->preparent;
+ dst->postparent = src->postparent;
+ dst->preparent2 = src->preparent2;
+ dst->postparent2 = src->postparent2;
+ if (src->xdata)
+ xdata = dict_ref(src->xdata);
+ else
+ xdata = NULL;
+ if (dst->xdata)
+ dict_unref(dst->xdata);
+ dst->xdata = xdata;
+ if (xdata && dict_get_str_boolean(xdata, "fips-mode-rchecksum",
+ _gf_false) == _gf_true) {
+ memcpy(dst->checksum, src->checksum, SHA256_DIGEST_LENGTH);
+ } else {
+ memcpy(dst->checksum, src->checksum, MD5_DIGEST_LENGTH);
+ }
+ dst->fips_mode_rchecksum = src->fips_mode_rchecksum;
}
+void
+afr_replies_copy(struct afr_reply *dst, struct afr_reply *src, int count)
+{
+ int i = 0;
+
+ if (dst == src)
+ return;
+
+ for (i = 0; i < count; i++) {
+ afr_reply_copy(&dst[i], &src[i]);
+ }
+}
int
-afr_selfheal_fill_dirty (xlator_t *this, int *dirty, int subvol,
- int idx, dict_t *xdata)
+afr_selfheal_fill_dirty(xlator_t *this, int *dirty, int subvol, int idx,
+ dict_t *xdata)
{
- void *pending_raw = NULL;
- int pending[3] = {0, };
+ void *pending_raw = NULL;
+ int pending[3] = {
+ 0,
+ };
- if (dict_get_ptr (xdata, AFR_DIRTY, &pending_raw))
- return -1;
+ if (!dirty)
+ return 0;
- if (!pending_raw)
- return -1;
+ if (dict_get_ptr(xdata, AFR_DIRTY, &pending_raw))
+ return -1;
- memcpy (pending, pending_raw, sizeof(pending));
+ if (!pending_raw)
+ return -1;
- dirty[subvol] = ntoh32 (pending[idx]);
+ memcpy(pending, pending_raw, sizeof(pending));
- return 0;
-}
+ dirty[subvol] = ntoh32(pending[idx]);
+ return 0;
+}
int
-afr_selfheal_fill_matrix (xlator_t *this, int **matrix, int subvol,
- int idx, dict_t *xdata)
+afr_selfheal_fill_matrix(xlator_t *this, int **matrix, int subvol, int idx,
+ dict_t *xdata)
{
- int i = 0;
- void *pending_raw = NULL;
- int pending[3] = {0, };
- afr_private_t *priv = NULL;
+ int i = 0;
+ void *pending_raw = NULL;
+ int pending[3] = {
+ 0,
+ };
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
- priv = this->private;
+ if (!matrix)
+ return 0;
- for (i = 0; i < priv->child_count; i++) {
- if (dict_get_ptr (xdata, priv->pending_key[i], &pending_raw))
- continue;
+ for (i = 0; i < priv->child_count; i++) {
+ if (dict_get_ptr(xdata, priv->pending_key[i], &pending_raw))
+ continue;
- if (!pending_raw)
- continue;
+ if (!pending_raw)
+ continue;
- memcpy (pending, pending_raw, sizeof(pending));
+ memcpy(pending, pending_raw, sizeof(pending));
- matrix[subvol][i] = ntoh32 (pending[idx]);
- }
+ matrix[subvol][i] = ntoh32(pending[idx]);
+ }
- return 0;
+ return 0;
}
-
int
-afr_selfheal_extract_xattr (xlator_t *this, struct afr_reply *replies,
- afr_transaction_type type, int *dirty, int **matrix)
+afr_selfheal_extract_xattr(xlator_t *this, struct afr_reply *replies,
+ afr_transaction_type type, int *dirty, int **matrix)
{
- afr_private_t *priv = NULL;
- int i = 0;
- dict_t *xdata = NULL;
- int idx = -1;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ dict_t *xdata = NULL;
+ int idx = -1;
+
+ idx = afr_index_for_transaction_type(type);
- idx = afr_index_for_transaction_type (type);
+ priv = this->private;
- priv = this->private;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret != 0)
+ continue;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].xdata)
- continue;
+ if (!replies[i].xdata)
+ continue;
- xdata = replies[i].xdata;
+ xdata = replies[i].xdata;
- afr_selfheal_fill_dirty (this, dirty, i, idx, xdata);
- afr_selfheal_fill_matrix (this, matrix, i, idx, xdata);
- }
+ afr_selfheal_fill_dirty(this, dirty, i, idx, xdata);
+ afr_selfheal_fill_matrix(this, matrix, i, idx, xdata);
+ }
- return 0;
+ return 0;
}
/*
@@ -316,109 +819,566 @@ afr_selfheal_extract_xattr (xlator_t *this, struct afr_reply *replies,
* This can happen if data was directly modified in the backend or for snapshots
*/
void
-afr_mark_largest_file_as_source (xlator_t *this, unsigned char *sources,
- struct afr_reply *replies)
+afr_mark_largest_file_as_source(xlator_t *this, unsigned char *sources,
+ struct afr_reply *replies)
{
- int i = 0;
- afr_private_t *priv = NULL;
- uint64_t size = 0;
-
- /* Find source with biggest file size */
- priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- if (!replies[i].valid || replies[i].op_ret != 0) {
- sources[i] = 0;
- continue;
- }
- if (size <= replies[i].poststat.ia_size) {
- size = replies[i].poststat.ia_size;
- }
+ int i = 0;
+ afr_private_t *priv = NULL;
+ uint64_t size = 0;
+
+ /* Find source with biggest file size */
+ priv = this->private;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ if (!replies[i].valid || replies[i].op_ret != 0) {
+ sources[i] = 0;
+ continue;
+ }
+ if (size <= replies[i].poststat.ia_size) {
+ size = replies[i].poststat.ia_size;
}
+ }
+
+ /* Mark sources with less size as not source */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ if (size > replies[i].poststat.ia_size)
+ sources[i] = 0;
+ }
+}
- /* Mark sources with less size as not source */
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- if (size > replies[i].poststat.ia_size)
- sources[i] = 0;
+void
+afr_mark_latest_mtime_file_as_source(xlator_t *this, unsigned char *sources,
+ struct afr_reply *replies)
+{
+ int i = 0;
+ afr_private_t *priv = NULL;
+ uint32_t mtime = 0;
+ uint32_t mtime_nsec = 0;
+
+ priv = this->private;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ if (!replies[i].valid || replies[i].op_ret != 0) {
+ sources[i] = 0;
+ continue;
}
+ if ((mtime < replies[i].poststat.ia_mtime) ||
+ ((mtime == replies[i].poststat.ia_mtime) &&
+ (mtime_nsec < replies[i].poststat.ia_mtime_nsec))) {
+ mtime = replies[i].poststat.ia_mtime;
+ mtime_nsec = replies[i].poststat.ia_mtime_nsec;
+ }
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ if ((mtime > replies[i].poststat.ia_mtime) ||
+ ((mtime == replies[i].poststat.ia_mtime) &&
+ (mtime_nsec > replies[i].poststat.ia_mtime_nsec))) {
+ sources[i] = 0;
+ }
+ }
}
void
-afr_mark_latest_mtime_file_as_source (xlator_t *this, unsigned char *sources,
- struct afr_reply *replies)
+afr_mark_active_sinks(xlator_t *this, unsigned char *sources,
+ unsigned char *locked_on, unsigned char *sinks)
{
- int i = 0;
- afr_private_t *priv = NULL;
- uint32_t mtime = 0;
- uint32_t mtime_nsec = 0;
+ int i = 0;
+ afr_private_t *priv = NULL;
- priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- if (!replies[i].valid || replies[i].op_ret != 0) {
- sources[i] = 0;
- continue;
- }
- if ((mtime < replies[i].poststat.ia_mtime) ||
- ((mtime == replies[i].poststat.ia_mtime) &&
- (mtime_nsec < replies[i].poststat.ia_mtime_nsec))) {
- mtime = replies[i].poststat.ia_mtime;
- mtime_nsec = replies[i].poststat.ia_mtime_nsec;
- }
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i] && locked_on[i])
+ sinks[i] = 1;
+ else
+ sinks[i] = 0;
+ }
+}
+
+gf_boolean_t
+afr_dict_contains_heal_op(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ dict_t *xdata_req = NULL;
+ int ret = 0;
+ int heal_op = -1;
+
+ local = frame->local;
+ xdata_req = local->xdata_req;
+ ret = dict_get_int32_sizen(xdata_req, "heal-op", &heal_op);
+ if (ret)
+ return _gf_false;
+ if (local->xdata_rsp == NULL) {
+ local->xdata_rsp = dict_new();
+ if (!local->xdata_rsp)
+ return _gf_true;
+ }
+ ret = dict_set_sizen_str_sizen(local->xdata_rsp, "sh-fail-msg",
+ SFILE_NOT_IN_SPLIT_BRAIN);
+
+ return _gf_true;
+}
+
+gf_boolean_t
+afr_can_decide_split_brain_source_sinks(struct afr_reply *replies,
+ int child_count)
+{
+ int i = 0;
+
+ for (i = 0; i < child_count; i++)
+ if (replies[i].valid != 1 || replies[i].op_ret != 0)
+ return _gf_false;
+
+ return _gf_true;
+}
+
+int
+afr_mark_split_brain_source_sinks_by_heal_op(
+ call_frame_t *frame, xlator_t *this, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks, unsigned char *locked_on,
+ struct afr_reply *replies, afr_transaction_type type, int heal_op)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xdata_req = NULL;
+ dict_t *xdata_rsp = NULL;
+ int ret = 0;
+ int i = 0;
+ char *name = NULL;
+ int source = -1;
+
+ local = frame->local;
+ priv = this->private;
+ xdata_req = local->xdata_req;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (locked_on[i])
+ if (sources[i] || !sinks[i] || !healed_sinks[i]) {
+ ret = -1;
+ goto out;
+ }
+ }
+ if (local->xdata_rsp == NULL) {
+ local->xdata_rsp = dict_new();
+ if (!local->xdata_rsp) {
+ ret = -1;
+ goto out;
}
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- if ((mtime > replies[i].poststat.ia_mtime) ||
- ((mtime == replies[i].poststat.ia_mtime) &&
- (mtime_nsec > replies[i].poststat.ia_mtime_nsec))) {
- sources[i] = 0;
+ }
+ xdata_rsp = local->xdata_rsp;
+
+ if (!afr_can_decide_split_brain_source_sinks(replies, priv->child_count)) {
+ ret = dict_set_sizen_str_sizen(xdata_rsp, "sh-fail-msg",
+ SBRAIN_HEAL_NO_GO_MSG);
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++)
+ if (locked_on[i])
+ sources[i] = 1;
+ switch (heal_op) {
+ case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:
+ if (type == AFR_METADATA_TRANSACTION) {
+ ret = dict_set_sizen_str_sizen(xdata_rsp, "sh-fail-msg",
+ SUSE_SOURCE_BRICK_TO_HEAL);
+ if (!ret)
+ ret = -1;
+ goto out;
+ }
+ afr_mark_largest_file_as_source(this, sources, replies);
+ if (AFR_COUNT(sources, priv->child_count) != 1) {
+ ret = dict_set_sizen_str_sizen(xdata_rsp, "sh-fail-msg",
+ SNO_BIGGER_FILE);
+ if (!ret)
+ ret = -1;
+ goto out;
+ }
+ break;
+ case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:
+ if (type == AFR_METADATA_TRANSACTION) {
+ ret = dict_set_sizen_str_sizen(xdata_rsp, "sh-fail-msg",
+ SUSE_SOURCE_BRICK_TO_HEAL);
+ if (!ret)
+ ret = -1;
+ goto out;
+ }
+ afr_mark_latest_mtime_file_as_source(this, sources, replies);
+ if (AFR_COUNT(sources, priv->child_count) != 1) {
+ ret = dict_set_sizen_str_sizen(xdata_rsp, "sh-fail-msg",
+ SNO_DIFF_IN_MTIME);
+ if (!ret)
+ ret = -1;
+ goto out;
+ }
+ break;
+ case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
+ ret = dict_get_str_sizen(xdata_req, "child-name", &name);
+ if (ret)
+ goto out;
+ source = afr_get_child_index_from_name(this, name);
+ if (source < 0) {
+ ret = dict_set_sizen_str_sizen(xdata_rsp, "sh-fail-msg",
+ SINVALID_BRICK_NAME);
+ if (!ret)
+ ret = -1;
+ goto out;
+ }
+ if (locked_on[source] != 1) {
+ ret = dict_set_sizen_str_sizen(xdata_rsp, "sh-fail-msg",
+ SBRICK_IS_NOT_UP);
+ if (!ret)
+ ret = -1;
+ goto out;
+ }
+ memset(sources, 0, sizeof(*sources) * priv->child_count);
+ sources[source] = 1;
+ break;
+ default:
+ ret = -1;
+ goto out;
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i]) {
+ source = i;
+ break;
+ }
+ }
+ sinks[source] = 0;
+ healed_sinks[source] = 0;
+ ret = source;
+out:
+ if (ret < 0)
+ memset(sources, 0, sizeof(*sources) * priv->child_count);
+ return ret;
+}
+
+int
+afr_sh_fav_by_majority(xlator_t *this, struct afr_reply *replies,
+ inode_t *inode)
+{
+ afr_private_t *priv;
+ int vote_count = -1;
+ int fav_child = -1;
+ int i = 0;
+ int k = 0;
+
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid == 1) {
+ gf_msg_debug(this->name, 0,
+ "Child:%s mtime_sec = %" PRId64 ", size = %" PRIu64
+ " for gfid %s",
+ priv->children[i]->name, replies[i].poststat.ia_mtime,
+ replies[i].poststat.ia_size, uuid_utoa(inode->gfid));
+ vote_count = 0;
+ for (k = 0; k < priv->child_count; k++) {
+ if ((replies[k].poststat.ia_mtime ==
+ replies[i].poststat.ia_mtime) &&
+ (replies[k].poststat.ia_size ==
+ replies[i].poststat.ia_size)) {
+ vote_count++;
}
+ }
+ if (vote_count > priv->child_count / 2) {
+ fav_child = i;
+ break;
+ }
}
+ }
+ return fav_child;
}
-void
-afr_mark_active_sinks (xlator_t *this, unsigned char *sources,
- unsigned char *locked_on, unsigned char *sinks)
+/*
+ * afr_sh_fav_by_mtime: Choose favorite child by mtime.
+ */
+int
+afr_sh_fav_by_mtime(xlator_t *this, struct afr_reply *replies, inode_t *inode)
{
- int i = 0;
- afr_private_t *priv = NULL;
+ afr_private_t *priv;
+ int fav_child = -1;
+ int i = 0;
+ uint32_t cmp_mtime = 0;
+ uint32_t cmp_mtime_nsec = 0;
+
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid == 1) {
+ gf_msg_debug(this->name, 0,
+ "Child:%s mtime = %" PRId64
+ ", mtime_nsec = %d for "
+ "gfid %s",
+ priv->children[i]->name, replies[i].poststat.ia_mtime,
+ replies[i].poststat.ia_mtime_nsec,
+ uuid_utoa(inode->gfid));
+ if (replies[i].poststat.ia_mtime > cmp_mtime) {
+ cmp_mtime = replies[i].poststat.ia_mtime;
+ cmp_mtime_nsec = replies[i].poststat.ia_mtime_nsec;
+ fav_child = i;
+ } else if ((replies[i].poststat.ia_mtime == cmp_mtime) &&
+ (replies[i].poststat.ia_mtime_nsec > cmp_mtime_nsec)) {
+ cmp_mtime = replies[i].poststat.ia_mtime;
+ cmp_mtime_nsec = replies[i].poststat.ia_mtime_nsec;
+ fav_child = i;
+ }
+ }
+ }
+ return fav_child;
+}
- priv = this->private;
+/*
+ * afr_sh_fav_by_ctime: Choose favorite child by ctime.
+ */
+int
+afr_sh_fav_by_ctime(xlator_t *this, struct afr_reply *replies, inode_t *inode)
+{
+ afr_private_t *priv;
+ int fav_child = -1;
+ int i = 0;
+ uint32_t cmp_ctime = 0;
+ uint32_t cmp_ctime_nsec = 0;
+
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid == 1) {
+ gf_msg_debug(this->name, 0,
+ "Child:%s ctime = %" PRId64
+ ", ctime_nsec = %d for "
+ "gfid %s",
+ priv->children[i]->name, replies[i].poststat.ia_ctime,
+ replies[i].poststat.ia_ctime_nsec,
+ uuid_utoa(inode->gfid));
+ if (replies[i].poststat.ia_ctime > cmp_ctime) {
+ cmp_ctime = replies[i].poststat.ia_ctime;
+ cmp_ctime_nsec = replies[i].poststat.ia_ctime_nsec;
+ fav_child = i;
+ } else if ((replies[i].poststat.ia_ctime == cmp_ctime) &&
+ (replies[i].poststat.ia_ctime_nsec > cmp_ctime_nsec)) {
+ cmp_ctime = replies[i].poststat.ia_ctime;
+ cmp_ctime_nsec = replies[i].poststat.ia_ctime_nsec;
+ fav_child = i;
+ }
+ }
+ }
+ return fav_child;
+}
- memset (sinks, 0, sizeof (*sinks) * priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i] && locked_on[i])
- sinks[i] = 1;
+/*
+ * afr_sh_fav_by_size: Choose favorite child by size
+ * when not all files are of zero size.
+ */
+int
+afr_sh_fav_by_size(xlator_t *this, struct afr_reply *replies, inode_t *inode)
+{
+ afr_private_t *priv;
+ int fav_child = -1;
+ int i = 0;
+ uint64_t cmp_sz = 0;
+
+ priv = this->private;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid) {
+ continue;
+ }
+ gf_msg_debug(this->name, 0,
+ "Child:%s file size = %" PRIu64 " for gfid %s",
+ priv->children[i]->name, replies[i].poststat.ia_size,
+ uuid_utoa(inode->gfid));
+ if (replies[i].poststat.ia_type == IA_IFDIR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SBRAIN_FAV_CHILD_POLICY,
+ "Cannot perform selfheal on %s. "
+ "Size policy is not applicable to directories.",
+ uuid_utoa(inode->gfid));
+ break;
}
+ if (replies[i].poststat.ia_size > cmp_sz) {
+ cmp_sz = replies[i].poststat.ia_size;
+ fav_child = i;
+ } else if (replies[i].poststat.ia_size == cmp_sz) {
+ fav_child = -1;
+ }
+ }
+ if (fav_child == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "No bigger file");
+ }
+ return fav_child;
+}
+
+int
+afr_sh_get_fav_by_policy(xlator_t *this, struct afr_reply *replies,
+ inode_t *inode, char **policy_str)
+{
+ afr_private_t *priv = NULL;
+ int fav_child = -1;
+
+ priv = this->private;
+ if (!afr_can_decide_split_brain_source_sinks(replies, priv->child_count)) {
+ return -1;
+ }
+
+ switch (priv->fav_child_policy) {
+ case AFR_FAV_CHILD_BY_SIZE:
+ fav_child = afr_sh_fav_by_size(this, replies, inode);
+ if (policy_str && fav_child >= 0) {
+ *policy_str = "SIZE";
+ }
+ break;
+ case AFR_FAV_CHILD_BY_CTIME:
+ fav_child = afr_sh_fav_by_ctime(this, replies, inode);
+ if (policy_str && fav_child >= 0) {
+ *policy_str = "CTIME";
+ }
+ break;
+ case AFR_FAV_CHILD_BY_MTIME:
+ fav_child = afr_sh_fav_by_mtime(this, replies, inode);
+ if (policy_str && fav_child >= 0) {
+ *policy_str = "MTIME";
+ }
+ break;
+ case AFR_FAV_CHILD_BY_MAJORITY:
+ fav_child = afr_sh_fav_by_majority(this, replies, inode);
+ if (policy_str && fav_child >= 0) {
+ *policy_str = "MAJORITY";
+ }
+ break;
+ case AFR_FAV_CHILD_NONE:
+ default:
+ break;
+ }
+
+ return fav_child;
+}
+
+int
+afr_mark_split_brain_source_sinks_by_policy(
+ call_frame_t *frame, xlator_t *this, inode_t *inode, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks, unsigned char *locked_on,
+ struct afr_reply *replies, afr_transaction_type type)
+{
+ afr_private_t *priv = NULL;
+ int fav_child = -1;
+ char mtime_str[256];
+ char ctime_str[256];
+ char *policy_str = NULL;
+ struct tm *tm_ptr;
+ time_t time;
+
+ priv = this->private;
+
+ fav_child = afr_sh_get_fav_by_policy(this, replies, inode, &policy_str);
+ if (fav_child == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SBRAIN_FAV_CHILD_POLICY,
+ "No child selected by favorite-child policy.");
+ } else if (fav_child > priv->child_count - 1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SBRAIN_FAV_CHILD_POLICY,
+ "Invalid child (%d) "
+ "selected by policy %s.",
+ fav_child, policy_str);
+ } else if (fav_child >= 0) {
+ time = replies[fav_child].poststat.ia_mtime;
+ tm_ptr = localtime(&time);
+ strftime(mtime_str, sizeof(mtime_str), "%Y-%m-%d %H:%M:%S", tm_ptr);
+ time = replies[fav_child].poststat.ia_ctime;
+ tm_ptr = localtime(&time);
+ strftime(ctime_str, sizeof(ctime_str), "%Y-%m-%d %H:%M:%S", tm_ptr);
+
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SBRAIN_FAV_CHILD_POLICY,
+ "Source %s selected as authentic to resolve conflicting data "
+ "in file (gfid:%s) by %s (%" PRIu64
+ " bytes @ %s mtime, %s "
+ "ctime).",
+ priv->children[fav_child]->name, uuid_utoa(inode->gfid),
+ policy_str, replies[fav_child].poststat.ia_size, mtime_str,
+ ctime_str);
+
+ sources[fav_child] = 1;
+ sinks[fav_child] = 0;
+ healed_sinks[fav_child] = 0;
+ }
+ return fav_child;
}
gf_boolean_t
-afr_dict_contains_heal_op (call_frame_t *frame)
+afr_is_file_empty_on_all_children(afr_private_t *priv,
+ struct afr_reply *replies)
{
- afr_local_t *local = NULL;
- dict_t *xdata_req = NULL;
- int ret = 0;
- int heal_op = -1;
+ int i = 0;
- local = frame->local;
- xdata_req = local->xdata_req;
- ret = dict_get_int32 (xdata_req, "heal-op", &heal_op);
- if (ret)
- return _gf_false;
- if (local->xdata_rsp == NULL) {
- local->xdata_rsp = dict_new();
- if (!local->xdata_rsp)
- return _gf_true;
+ for (i = 0; i < priv->child_count; i++) {
+ if ((!replies[i].valid) || (replies[i].op_ret != 0) ||
+ (replies[i].poststat.ia_size != 0))
+ return _gf_false;
+ }
+
+ return _gf_true;
+}
+
+int
+afr_mark_source_sinks_if_file_empty(xlator_t *this, unsigned char *sources,
+ unsigned char *sinks,
+ unsigned char *healed_sinks,
+ unsigned char *locked_on,
+ struct afr_reply *replies,
+ afr_transaction_type type)
+{
+ int source = -1;
+ int i = 0;
+ afr_private_t *priv = this->private;
+ struct iatt stbuf = {
+ 0,
+ };
+
+ if ((AFR_COUNT(locked_on, priv->child_count) < priv->child_count) ||
+ (afr_success_count(replies, priv->child_count) < priv->child_count))
+ return -1;
+
+ if (type == AFR_DATA_TRANSACTION) {
+ if (!afr_is_file_empty_on_all_children(priv, replies))
+ return -1;
+ goto mark;
+ }
+
+ /*For AFR_METADATA_TRANSACTION, metadata must be same on all bricks.*/
+ stbuf = replies[0].poststat;
+ for (i = 1; i < priv->child_count; i++) {
+ if ((!IA_EQUAL(stbuf, replies[i].poststat, type)) ||
+ (!IA_EQUAL(stbuf, replies[i].poststat, uid)) ||
+ (!IA_EQUAL(stbuf, replies[i].poststat, gid)) ||
+ (!IA_EQUAL(stbuf, replies[i].poststat, prot)))
+ return -1;
+ }
+ for (i = 1; i < priv->child_count; i++) {
+ if (!afr_xattrs_are_equal(replies[0].xdata, replies[i].xdata))
+ return -1;
+ }
+
+mark:
+ /* data/metadata is same on all bricks. Pick one of them as source. Rest
+ * are sinks.*/
+ for (i = 0; i < priv->child_count; i++) {
+ if (source == -1) {
+ source = i;
+ sources[i] = 1;
+ sinks[i] = 0;
+ healed_sinks[i] = 0;
+ continue;
}
- ret = dict_set_str (local->xdata_rsp, "sh-fail-msg",
- "File not in split-brain");
+ sources[i] = 0;
+ sinks[i] = 1;
+ healed_sinks[i] = 1;
+ }
- return _gf_true;
+ return source;
}
/* Return a source depending on the type of heal_op, and set sources[source],
@@ -429,143 +1389,156 @@ afr_dict_contains_heal_op (call_frame_t *frame)
* sinks[node] are 1. This should be the case if the file is in split-brain.
*/
int
-afr_mark_split_brain_source_sinks (call_frame_t *frame, xlator_t *this,
- unsigned char *sources,
- unsigned char *sinks,
- unsigned char *healed_sinks,
- unsigned char *locked_on,
- struct afr_reply *replies,
- afr_transaction_type type)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- dict_t *xdata_req = NULL;
- dict_t *xdata_rsp = NULL;
- int ret = 0;
- int heal_op = -1;
- int i = 0;
- char *name = NULL;
- int source = -1;
-
- local = frame->local;
- priv = this->private;
- xdata_req = local->xdata_req;
-
- ret = dict_get_int32 (xdata_req, "heal-op", &heal_op);
- if (ret)
- goto out;
+afr_mark_split_brain_source_sinks(
+ call_frame_t *frame, xlator_t *this, inode_t *inode, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks, unsigned char *locked_on,
+ struct afr_reply *replies, afr_transaction_type type)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xdata_req = NULL;
+ int heal_op = -1;
+ int ret = -1;
+ int source = -1;
+
+ local = frame->local;
+ priv = this->private;
+ xdata_req = local->xdata_req;
+
+ source = afr_mark_source_sinks_if_file_empty(
+ this, sources, sinks, healed_sinks, locked_on, replies, type);
+ if (source >= 0)
+ return source;
- for (i = 0; i < priv->child_count; i++) {
- if (locked_on[i])
- if (sources[i] || !sinks[i] || !healed_sinks[i]) {
- ret = -1;
- goto out;
- }
- }
- if (local->xdata_rsp == NULL) {
- local->xdata_rsp = dict_new();
- if (!local->xdata_rsp) {
- ret = -1;
- goto out;
- }
+ ret = dict_get_int32_sizen(xdata_req, "heal-op", &heal_op);
+ if (ret)
+ goto autoheal;
+
+ source = afr_mark_split_brain_source_sinks_by_heal_op(
+ frame, this, sources, sinks, healed_sinks, locked_on, replies, type,
+ heal_op);
+ return source;
+
+autoheal:
+ /* Automatically heal if fav_child_policy is set. */
+ if (priv->fav_child_policy != AFR_FAV_CHILD_NONE) {
+ source = afr_mark_split_brain_source_sinks_by_policy(
+ frame, this, inode, sources, sinks, healed_sinks, locked_on,
+ replies, type);
+ if (source != -1) {
+ ret = dict_set_int32_sizen(xdata_req, "fav-child-policy", 1);
+ if (ret)
+ return -1;
}
- xdata_rsp = local->xdata_rsp;
+ }
- for (i = 0 ; i < priv->child_count; i++)
- if (locked_on[i])
- sources[i] = 1;
- switch (heal_op) {
- case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:
- if (type == AFR_METADATA_TRANSACTION) {
- ret = dict_set_str (xdata_rsp, "sh-fail-msg",
- "Use source-brick option to"
- " heal metadata split-brain");
- if (!ret)
- ret = -1;
- goto out;
- }
- afr_mark_largest_file_as_source (this, sources, replies);
- if (AFR_COUNT (sources, priv->child_count) != 1) {
- ret = dict_set_str (xdata_rsp, "sh-fail-msg",
- "No bigger file");
- if (!ret)
- ret = -1;
- goto out;
- }
- break;
- case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:
- if (type == AFR_METADATA_TRANSACTION) {
- ret = dict_set_str (xdata_rsp, "sh-fail-msg",
- "Use source-brick option to"
- " heal metadata split-brain");
- if (!ret)
- ret = -1;
- goto out;
- }
- afr_mark_latest_mtime_file_as_source (this, sources, replies);
- if (AFR_COUNT (sources, priv->child_count) != 1) {
- ret = dict_set_str (xdata_rsp, "sh-fail-msg",
- "No difference in mtime");
- if (!ret)
- ret = -1;
- goto out;
- }
- break;
- case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
- ret = dict_get_str (xdata_req, "child-name", &name);
- if (ret)
- goto out;
- source = afr_get_child_index_from_name (this, name);
- if (source < 0) {
- ret = dict_set_str (xdata_rsp, "sh-fail-msg",
- "Invalid brick name");
- if (!ret)
- ret = -1;
- goto out;
- }
- if (locked_on[source] != 1) {
- ret = dict_set_str (xdata_rsp, "sh-fail-msg",
- "Brick is not up");
- if (!ret)
- ret = -1;
- goto out;
- }
- memset (sources, 0, sizeof (*sources) * priv->child_count);
- sources[source] = 1;
- break;
- default:
- ret = -1;
- goto out;
- }
- for (i = 0 ; i < priv->child_count; i++) {
- if (sources[i]) {
- source = i;
- break;
- }
- }
- sinks[source] = 0;
- healed_sinks[source] = 0;
- ret = source;
-out:
- if (ret < 0)
- memset (sources, 0, sizeof (*sources) * priv->child_count);
- return ret;
+ return source;
+}
+
+int
+_afr_fav_child_reset_sink_xattrs(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, int source,
+ unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ afr_transaction_type type,
+ unsigned char *locked_on,
+ struct afr_reply *replies)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int *input_dirty = NULL;
+ int **input_matrix = NULL;
+ int *output_dirty = NULL;
+ int **output_matrix = NULL;
+ dict_t *xattr = NULL;
+ dict_t *xdata = NULL;
+ int i = 0;
+
+ priv = this->private;
+ local = frame->local;
+
+ if (!dict_get_sizen(local->xdata_req, "fav-child-policy"))
+ return 0;
+
+ xdata = dict_new();
+ if (!xdata)
+ return -1;
+
+ input_dirty = alloca0(priv->child_count * sizeof(int));
+ input_matrix = ALLOC_MATRIX(priv->child_count, int);
+ output_dirty = alloca0(priv->child_count * sizeof(int));
+ output_matrix = ALLOC_MATRIX(priv->child_count, int);
+
+ afr_selfheal_extract_xattr(this, replies, type, input_dirty, input_matrix);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (i == source || !healed_sinks[i])
+ continue;
+ output_dirty[i] = -input_dirty[i];
+ output_matrix[i][source] = -input_matrix[i][source];
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!healed_sinks[i] || !locked_on[i])
+ continue;
+ xattr = afr_selfheal_output_xattr(this, _gf_false, type, output_dirty,
+ output_matrix, i, NULL);
+
+ afr_selfheal_post_op(frame, this, inode, i, xattr, xdata);
+
+ undid_pending[i] = 1;
+ dict_unref(xattr);
+ }
+
+ if (xdata)
+ dict_unref(xdata);
+ return 0;
}
gf_boolean_t
-afr_does_witness_exist (xlator_t *this, uint64_t *witness)
+afr_does_witness_exist(xlator_t *this, uint64_t *witness)
{
- int i = 0;
- afr_private_t *priv = NULL;
+ int i = 0;
+ afr_private_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (witness[i])
- return _gf_true;
+ for (i = 0; i < priv->child_count; i++) {
+ if (witness[i])
+ return _gf_true;
+ }
+ return _gf_false;
+}
+
+unsigned int
+afr_get_quorum_count(afr_private_t *priv)
+{
+ if (priv->quorum_count == AFR_QUORUM_AUTO) {
+ return priv->child_count / 2 + 1;
+ } else {
+ return priv->quorum_count;
+ }
+}
+
+void
+afr_selfheal_post_op_failure_accounting(afr_private_t *priv, char *accused,
+ unsigned char *sources,
+ unsigned char *locked_on)
+{
+ int i = 0;
+ unsigned int quorum_count = 0;
+
+ if (AFR_COUNT(sources, priv->child_count) != 0)
+ return;
+
+ quorum_count = afr_get_quorum_count(priv);
+ for (i = 0; i < priv->child_count; i++) {
+ if ((accused[i] < quorum_count) && locked_on[i]) {
+ sources[i] = 1;
}
- return _gf_false;
+ }
+ return;
}
/*
@@ -588,569 +1561,711 @@ afr_does_witness_exist (xlator_t *this, uint64_t *witness)
*/
int
-afr_selfheal_find_direction (call_frame_t *frame, xlator_t *this,
- struct afr_reply *replies,
- afr_transaction_type type,
- unsigned char *locked_on, unsigned char *sources,
- unsigned char *sinks, uint64_t *witness,
- gf_boolean_t *pflag)
-{
- afr_private_t *priv = NULL;
- int i = 0;
- int j = 0;
- int *dirty = NULL; /* Denotes if dirty xattr is set */
- int **matrix = NULL;/* Changelog matrix */
- char *accused = NULL;/* Accused others without any self-accusal */
- char *pending = NULL;/* Have pending operations on others */
- char *self_accused = NULL; /* Accused itself */
-
- priv = this->private;
-
- dirty = alloca0 (priv->child_count * sizeof (int));
- accused = alloca0 (priv->child_count);
- pending = alloca0 (priv->child_count);
- self_accused = alloca0 (priv->child_count);
- matrix = ALLOC_MATRIX(priv->child_count, int);
- memset (witness, 0, sizeof (*witness) * priv->child_count);
-
- /* First construct the pending matrix for further analysis */
- afr_selfheal_extract_xattr (this, replies, type, dirty, matrix);
-
- if (pflag) {
- for (i = 0; i < priv->child_count; i++) {
- for (j = 0; j < priv->child_count; j++)
- if (matrix[i][j])
- *pflag = _gf_true;
- if (*pflag)
- break;
- }
+afr_selfheal_find_direction(call_frame_t *frame, xlator_t *this,
+ struct afr_reply *replies,
+ afr_transaction_type type, unsigned char *locked_on,
+ unsigned char *sources, unsigned char *sinks,
+ uint64_t *witness, unsigned char *pflag)
+{
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int j = 0;
+ int *dirty = NULL; /* Denotes if dirty xattr is set */
+ int **matrix = NULL; /* Changelog matrix */
+ char *accused = NULL; /* Accused others without any self-accusal */
+ char *pending = NULL; /* Have pending operations on others */
+ char *self_accused = NULL; /* Accused itself */
+
+ priv = this->private;
+
+ dirty = alloca0(priv->child_count * sizeof(int));
+ accused = alloca0(priv->child_count);
+ pending = alloca0(priv->child_count);
+ self_accused = alloca0(priv->child_count);
+ matrix = ALLOC_MATRIX(priv->child_count, int);
+ memset(witness, 0, sizeof(*witness) * priv->child_count);
+
+ /* First construct the pending matrix for further analysis */
+ afr_selfheal_extract_xattr(this, replies, type, dirty, matrix);
+
+ if (pflag) {
+ for (i = 0; i < priv->child_count; i++) {
+ for (j = 0; j < priv->child_count; j++)
+ if (matrix[i][j])
+ *pflag |= PFLAG_PENDING;
+ if (*pflag)
+ break;
}
-
- if (afr_success_count (replies,
- priv->child_count) < AFR_SH_MIN_PARTICIPANTS) {
- /* Treat this just like locks not being acquired */
- return -ENOTCONN;
+ }
+
+ if (afr_success_count(replies, priv->child_count) < priv->child_count) {
+ /* Treat this just like locks not being acquired */
+ return -ENOTCONN;
+ }
+
+ /* short list all self-accused */
+ for (i = 0; i < priv->child_count; i++) {
+ if (matrix[i][i])
+ self_accused[i] = 1;
+ }
+
+ /* Next short list all accused to exclude them from being sources */
+ /* Self-accused can't accuse others as they are FOOLs */
+ for (i = 0; i < priv->child_count; i++) {
+ for (j = 0; j < priv->child_count; j++) {
+ if (matrix[i][j]) {
+ if (!self_accused[i])
+ accused[j] += 1;
+ if (i != j)
+ pending[i] += 1;
+ }
}
+ }
- /* short list all self-accused */
- for (i = 0; i < priv->child_count; i++) {
- if (matrix[i][i])
- self_accused[i] = 1;
+ /* Short list all non-accused as sources */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!accused[i] && locked_on[i])
+ sources[i] = 1;
+ else
+ sources[i] = 0;
+ }
+
+ /* Everyone accused by non-self-accused sources are sinks */
+ memset(sinks, 0, priv->child_count);
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ if (self_accused[i])
+ continue;
+ for (j = 0; j < priv->child_count; j++) {
+ if (matrix[i][j])
+ sinks[j] = 1;
}
-
- /* Next short list all accused to exclude them from being sources */
- /* Self-accused can't accuse others as they are FOOLs */
- for (i = 0; i < priv->child_count; i++) {
- for (j = 0; j < priv->child_count; j++) {
- if (matrix[i][j]) {
- if (!self_accused[i])
- accused[j] = 1;
-
- if (i != j)
- pending[i] = 1;
- }
- }
- }
-
- /* Short list all non-accused as sources */
- memset (sources, 0, priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- if (!accused[i] && locked_on[i])
- sources[i] = 1;
- }
-
- /* Everyone accused by non-self-accused sources are sinks */
- memset (sinks, 0, priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- if (self_accused[i])
- continue;
- for (j = 0; j < priv->child_count; j++) {
- if (matrix[i][j])
- sinks[j] = 1;
- }
+ }
+
+ /* For breaking ties provide with number of fops they witnessed */
+
+ /*
+ * count the pending fops witnessed from itself to others when it is
+ * self-accused
+ */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!self_accused[i])
+ continue;
+ for (j = 0; j < priv->child_count; j++) {
+ if (i == j)
+ continue;
+ witness[i] += matrix[i][j];
}
+ }
- /* For breaking ties provide with number of fops they witnessed */
+ if (type == AFR_DATA_TRANSACTION || type == AFR_METADATA_TRANSACTION)
+ afr_selfheal_post_op_failure_accounting(priv, accused, sources,
+ locked_on);
- /*
- * count the pending fops witnessed from itself to others when it is
- * self-accused
- */
+ /* If no sources, all locked nodes are sinks - split brain */
+ if (AFR_COUNT(sources, priv->child_count) == 0) {
for (i = 0; i < priv->child_count; i++) {
- if (!self_accused[i])
- continue;
- for (j = 0; j < priv->child_count; j++) {
- if (i == j)
- continue;
- witness[i] += matrix[i][j];
- }
+ if (locked_on[i])
+ sinks[i] = 1;
}
-
- /* If no sources, all locked nodes are sinks - split brain */
- if (AFR_COUNT (sources, priv->child_count) == 0) {
- for (i = 0; i < priv->child_count; i++) {
- if (locked_on[i])
- sinks[i] = 1;
- }
+ if (pflag)
+ *pflag |= PFLAG_SBRAIN;
+ }
+
+ /* One more class of witness similar to dirty in v2 is where no pending
+ * exists but we have self-accusing markers. This can happen in afr-v1
+ * if the brick crashes just after doing xattrop on self but
+ * before xattrop on the other xattrs on the brick in pre-op. */
+ if (AFR_COUNT(pending, priv->child_count) == 0) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (self_accused[i])
+ witness[i] += matrix[i][i];
}
-
- /* In afr-v1 if a file is self-accused but didn't have any pending
+ } else {
+ /* In afr-v1 if a file is self-accused and has pending
* operations on others then it is similar to 'dirty' in afr-v2.
* Consider such cases as witness.
*/
for (i = 0; i < priv->child_count; i++) {
- if (self_accused[i] && !pending[i])
- witness[i] += matrix[i][i];
+ if (self_accused[i] && pending[i])
+ witness[i] += matrix[i][i];
}
+ }
- /* count the number of dirty fops witnessed */
- for (i = 0; i < priv->child_count; i++)
- witness[i] += dirty[i];
+ /* count the number of dirty fops witnessed */
+ for (i = 0; i < priv->child_count; i++)
+ witness[i] += dirty[i];
- return 0;
+ return 0;
}
void
-afr_log_selfheal (uuid_t gfid, xlator_t *this, int ret, char *type,
- int source, unsigned char *healed_sinks)
-{
- char *status = NULL;
- char *sinks_str = NULL;
- char *p = NULL;
- afr_private_t *priv = NULL;
- gf_loglevel_t loglevel = GF_LOG_NONE;
- int i = 0;
-
- priv = this->private;
- sinks_str = alloca0 (priv->child_count * 8);
- p = sinks_str;
- for (i = 0; i < priv->child_count; i++) {
- if (!healed_sinks[i])
- continue;
- p += sprintf (p, "%d ", i);
- }
-
- if (ret < 0) {
- status = "Failed";
- loglevel = GF_LOG_DEBUG;
- } else {
- status = "Completed";
- loglevel = GF_LOG_INFO;
+afr_log_selfheal(uuid_t gfid, xlator_t *this, int ret, char *type, int source,
+ unsigned char *sources, unsigned char *healed_sinks)
+{
+ char *status = NULL;
+ char *sinks_str = NULL;
+ char *p = NULL;
+ char *sources_str = NULL;
+ char *q = NULL;
+ afr_private_t *priv = NULL;
+ gf_loglevel_t loglevel = GF_LOG_NONE;
+ int i = 0;
+
+ priv = this->private;
+ sinks_str = alloca0(priv->child_count * 8);
+ p = sinks_str;
+ sources_str = alloca0(priv->child_count * 8);
+ q = sources_str;
+ for (i = 0; i < priv->child_count; i++) {
+ if (healed_sinks[i])
+ p += sprintf(p, "%d ", i);
+ if (sources[i]) {
+ if (source == i) {
+ q += sprintf(q, "[%d] ", i);
+ } else {
+ q += sprintf(q, "%d ", i);
+ }
}
-
- gf_msg (this->name, loglevel, 0,
- AFR_MSG_SELF_HEAL_INFO, "%s %s selfheal on %s. "
- "source=%d sinks=%s", status, type, uuid_utoa (gfid),
- source, sinks_str);
+ }
+
+ if (ret < 0) {
+ status = "Failed";
+ loglevel = GF_LOG_DEBUG;
+ } else {
+ status = "Completed";
+ loglevel = GF_LOG_INFO;
+ }
+
+ gf_msg(this->name, loglevel, 0, AFR_MSG_SELF_HEAL_INFO,
+ "%s %s selfheal on %s. "
+ "sources=%s sinks=%s",
+ status, type, uuid_utoa(gfid), sources_str, sinks_str);
}
int
-afr_selfheal_discover_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, inode_t *inode,
- struct iatt *buf, dict_t *xdata, struct iatt *parbuf)
-{
- afr_local_t *local = NULL;
- int i = -1;
- GF_UNUSED int ret = -1;
- int8_t need_heal = 1;
-
- local = frame->local;
- i = (long) cookie;
-
- local->replies[i].valid = 1;
- local->replies[i].op_ret = op_ret;
- local->replies[i].op_errno = op_errno;
- if (buf)
- local->replies[i].poststat = *buf;
- if (parbuf)
- local->replies[i].postparent = *parbuf;
- if (xdata) {
- local->replies[i].xdata = dict_ref (xdata);
- ret = dict_get_int8 (xdata, "link-count", &need_heal);
- local->replies[i].need_heal = need_heal;
- } else {
- local->replies[i].need_heal = need_heal;
- }
-
- syncbarrier_wake (&local->barrier);
-
- return 0;
+afr_selfheal_discover_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata, struct iatt *parbuf)
+{
+ afr_local_t *local = NULL;
+ int i = -1;
+ GF_UNUSED int ret = -1;
+ int8_t need_heal = 1;
+
+ local = frame->local;
+ i = (long)cookie;
+
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+ if (buf)
+ local->replies[i].poststat = *buf;
+ if (parbuf)
+ local->replies[i].postparent = *parbuf;
+ if (xdata) {
+ local->replies[i].xdata = dict_ref(xdata);
+ ret = dict_get_int8(xdata, "link-count", &need_heal);
+ }
+
+ local->replies[i].need_heal = need_heal;
+ syncbarrier_wake(&local->barrier);
+
+ return 0;
}
-
inode_t *
-afr_selfheal_unlocked_lookup_on (call_frame_t *frame, inode_t *parent,
- const char *name, struct afr_reply *replies,
- unsigned char *lookup_on, dict_t *xattr)
+afr_selfheal_unlocked_lookup_on(call_frame_t *frame, inode_t *parent,
+ const char *name, struct afr_reply *replies,
+ unsigned char *lookup_on, dict_t *xattr)
{
- loc_t loc = {0, };
- dict_t *xattr_req = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- inode_t *inode = NULL;
+ loc_t loc = {
+ 0,
+ };
+ dict_t *xattr_req = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ inode_t *inode = NULL;
+
+ local = frame->local;
+ priv = frame->this->private;
+
+ xattr_req = dict_new();
+ if (!xattr_req)
+ return NULL;
+
+ if (xattr)
+ dict_copy(xattr, xattr_req);
- local = frame->local;
- priv = frame->this->private;
+ if (afr_xattr_req_prepare(frame->this, xattr_req) != 0) {
+ dict_unref(xattr_req);
+ return NULL;
+ }
- xattr_req = dict_new ();
- if (!xattr_req)
- return NULL;
+ inode = inode_new(parent->table);
+ if (!inode) {
+ dict_unref(xattr_req);
+ return NULL;
+ }
- if (xattr)
- dict_copy (xattr, xattr_req);
+ loc.parent = inode_ref(parent);
+ gf_uuid_copy(loc.pargfid, parent->gfid);
+ loc.name = name;
+ loc.inode = inode_ref(inode);
- if (afr_xattr_req_prepare (frame->this, xattr_req) != 0) {
- dict_destroy (xattr_req);
- return NULL;
- }
+ AFR_ONLIST(lookup_on, frame, afr_selfheal_discover_cbk, lookup, &loc,
+ xattr_req);
- inode = inode_new (parent->table);
- if (!inode) {
- dict_destroy (xattr_req);
- return NULL;
- }
+ afr_replies_copy(replies, local->replies, priv->child_count);
- loc.parent = inode_ref (parent);
- gf_uuid_copy (loc.pargfid, parent->gfid);
- loc.name = name;
- loc.inode = inode_ref (inode);
+ loc_wipe(&loc);
+ dict_unref(xattr_req);
- AFR_ONLIST (lookup_on, frame, afr_selfheal_discover_cbk, lookup, &loc,
- xattr_req);
+ return inode;
+}
- afr_replies_copy (replies, local->replies, priv->child_count);
+static int
+afr_set_multi_dom_lock_count_request(xlator_t *this, dict_t *dict)
+{
+ int ret = 0;
+ afr_private_t *priv = NULL;
+ char *key1 = NULL;
+ char *key2 = NULL;
+
+ priv = this->private;
+ key1 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
+ strlen(this->name));
+ key2 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
+ strlen(priv->sh_domain));
+
+ ret = dict_set_uint32(dict, GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS, 1);
+ if (ret)
+ return ret;
- loc_wipe (&loc);
- dict_unref (xattr_req);
+ sprintf(key1, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, this->name);
+ ret = dict_set_uint32(dict, key1, 1);
+ if (ret)
+ return ret;
- return inode;
+ sprintf(key2, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, priv->sh_domain);
+ ret = dict_set_uint32(dict, key2, 1);
+ if (ret)
+ return ret;
+
+ return 0;
}
int
-afr_selfheal_unlocked_discover_on (call_frame_t *frame, inode_t *inode,
- uuid_t gfid, struct afr_reply *replies,
- unsigned char *discover_on)
+afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
+ uuid_t gfid, struct afr_reply *replies,
+ unsigned char *discover_on, dict_t *dict)
{
- loc_t loc = {0, };
- dict_t *xattr_req = NULL;
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
+ loc_t loc = {
+ 0,
+ };
+ dict_t *xattr_req = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ local = frame->local;
+ priv = frame->this->private;
- local = frame->local;
- priv = frame->this->private;
+ xattr_req = dict_new();
+ if (!xattr_req)
+ return -ENOMEM;
+ if (dict)
+ dict_copy(dict, xattr_req);
- xattr_req = dict_new ();
- if (!xattr_req)
- return -ENOMEM;
+ if (afr_xattr_req_prepare(frame->this, xattr_req) != 0) {
+ dict_unref(xattr_req);
+ return -ENOMEM;
+ }
- if (afr_xattr_req_prepare (frame->this, xattr_req) != 0) {
- dict_destroy (xattr_req);
- return -ENOMEM;
- }
+ if (afr_set_multi_dom_lock_count_request(frame->this, xattr_req)) {
+ dict_unref(xattr_req);
+ return -1;
+ }
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, gfid);
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, gfid);
- AFR_ONLIST (discover_on, frame, afr_selfheal_discover_cbk, lookup, &loc,
- xattr_req);
+ AFR_ONLIST(discover_on, frame, afr_selfheal_discover_cbk, lookup, &loc,
+ xattr_req);
- afr_replies_copy (replies, local->replies, priv->child_count);
+ afr_replies_copy(replies, local->replies, priv->child_count);
- loc_wipe (&loc);
- dict_unref (xattr_req);
+ loc_wipe(&loc);
+ dict_unref(xattr_req);
- return 0;
+ return 0;
}
int
-afr_selfheal_unlocked_discover (call_frame_t *frame, inode_t *inode,
- uuid_t gfid, struct afr_reply *replies)
+afr_selfheal_unlocked_discover(call_frame_t *frame, inode_t *inode, uuid_t gfid,
+ struct afr_reply *replies)
{
- afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ dict_t *dict = NULL;
- priv = frame->this->private;
+ local = frame->local;
- return afr_selfheal_unlocked_discover_on (frame, inode, gfid, replies,
- priv->child_up);
+ if (local->xattr_req)
+ dict = local->xattr_req;
+
+ return afr_selfheal_unlocked_discover_on(frame, inode, gfid, replies,
+ local->child_up, dict);
}
unsigned int
-afr_success_count (struct afr_reply *replies, unsigned int count)
+afr_success_count(struct afr_reply *replies, unsigned int count)
{
- int i = 0;
- unsigned int success = 0;
+ int i = 0;
+ unsigned int success = 0;
- for (i = 0; i < count; i++)
- if (replies[i].valid && replies[i].op_ret == 0)
- success++;
- return success;
+ for (i = 0; i < count; i++)
+ if (replies[i].valid && replies[i].op_ret == 0)
+ success++;
+ return success;
}
int
-afr_selfheal_lock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, dict_t *xdata)
+afr_selfheal_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xdata)
{
- afr_local_t *local = NULL;
- int i = 0;
+ afr_local_t *local = NULL;
+ int i = 0;
- local = frame->local;
- i = (long) cookie;
+ local = frame->local;
+ i = (long)cookie;
- local->replies[i].valid = 1;
- local->replies[i].op_ret = op_ret;
- local->replies[i].op_errno = op_errno;
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
- syncbarrier_wake (&local->barrier);
+ syncbarrier_wake(&local->barrier);
- return 0;
+ return 0;
}
-
int
-afr_locked_fill (call_frame_t *frame, xlator_t *this,
- unsigned char *locked_on)
+afr_locked_fill(call_frame_t *frame, xlator_t *this, unsigned char *locked_on)
{
- int i = 0;
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int count = 0;
-
- local = frame->local;
- priv = this->private;
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->replies[i].valid && local->replies[i].op_ret == 0) {
- locked_on[i] = 1;
- count++;
- } else {
- locked_on[i] = 0;
- }
- }
+ int i = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int count = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].valid && local->replies[i].op_ret == 0) {
+ locked_on[i] = 1;
+ count++;
+ } else {
+ locked_on[i] = 0;
+ }
+ }
- return count;
+ return count;
}
-
int
-afr_selfheal_tryinodelk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, off_t off, size_t size,
- unsigned char *locked_on)
+afr_selfheal_tryinodelk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, off_t off, size_t size,
+ unsigned char *locked_on)
{
- loc_t loc = {0,};
- struct gf_flock flock = {0, };
+ loc_t loc = {
+ 0,
+ };
+ struct gf_flock flock = {
+ 0,
+ };
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- flock.l_type = F_WRLCK;
- flock.l_start = off;
- flock.l_len = size;
+ flock.l_type = F_WRLCK;
+ flock.l_start = off;
+ flock.l_len = size;
- AFR_ONALL (frame, afr_selfheal_lock_cbk, inodelk, dom,
- &loc, F_SETLK, &flock, NULL);
+ AFR_ONALL(frame, afr_selfheal_lock_cbk, inodelk, dom, &loc, F_SETLK, &flock,
+ NULL);
- loc_wipe (&loc);
+ loc_wipe(&loc);
- return afr_locked_fill (frame, this, locked_on);
+ return afr_locked_fill(frame, this, locked_on);
}
-
int
-afr_selfheal_inodelk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, off_t off, size_t size,
- unsigned char *locked_on)
+afr_selfheal_inodelk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, off_t off, size_t size,
+ unsigned char *locked_on)
{
- loc_t loc = {0,};
- struct gf_flock flock = {0, };
- afr_local_t *local = NULL;
- int i = 0;
- afr_private_t *priv = NULL;
-
- priv = this->private;
- local = frame->local;
-
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
-
- flock.l_type = F_WRLCK;
- flock.l_start = off;
- flock.l_len = size;
-
- AFR_ONALL (frame, afr_selfheal_lock_cbk, inodelk, dom,
- &loc, F_SETLK, &flock, NULL);
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->replies[i].op_ret == -1 &&
- local->replies[i].op_errno == EAGAIN) {
- afr_locked_fill (frame, this, locked_on);
- afr_selfheal_uninodelk (frame, this, inode, dom, off,
- size, locked_on);
-
- AFR_SEQ (frame, afr_selfheal_lock_cbk, inodelk, dom,
- &loc, F_SETLKW, &flock, NULL);
- break;
- }
- }
+ loc_t loc = {
+ 0,
+ };
+ struct gf_flock flock = {
+ 0,
+ };
+ afr_local_t *local = NULL;
+ int i = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
+
+ flock.l_type = F_WRLCK;
+ flock.l_start = off;
+ flock.l_len = size;
+
+ AFR_ONALL(frame, afr_selfheal_lock_cbk, inodelk, dom, &loc, F_SETLK, &flock,
+ NULL);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].op_ret == -1 &&
+ local->replies[i].op_errno == EAGAIN) {
+ afr_locked_fill(frame, this, locked_on);
+ afr_selfheal_uninodelk(frame, this, inode, dom, off, size,
+ locked_on);
+
+ AFR_SEQ(frame, afr_selfheal_lock_cbk, inodelk, dom, &loc, F_SETLKW,
+ &flock, NULL);
+ break;
+ }
+ }
- loc_wipe (&loc);
+ loc_wipe(&loc);
- return afr_locked_fill (frame, this, locked_on);
+ return afr_locked_fill(frame, this, locked_on);
}
+static void
+afr_get_lock_and_eagain_counts(afr_private_t *priv, struct afr_reply *replies,
+ int *lock_count, int *eagain_count)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+ if (replies[i].op_ret == 0) {
+ (*lock_count)++;
+ } else if (replies[i].op_ret == -1 && replies[i].op_errno == EAGAIN) {
+ (*eagain_count)++;
+ }
+ }
+}
+/*Do blocking locks if number of locks acquired is majority and there were some
+ * EAGAINs. Useful for odd-way replication*/
int
-afr_selfheal_uninodelk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, off_t off, size_t size,
- const unsigned char *locked_on)
+afr_selfheal_tie_breaker_inodelk(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, char *dom, off_t off,
+ size_t size, unsigned char *locked_on)
{
- loc_t loc = {0,};
- struct gf_flock flock = {0, };
+ loc_t loc = {
+ 0,
+ };
+ struct gf_flock flock = {
+ 0,
+ };
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int lock_count = 0;
+ int eagain_count = 0;
+ priv = this->private;
+ local = frame->local;
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- flock.l_type = F_UNLCK;
- flock.l_start = off;
- flock.l_len = size;
+ flock.l_type = F_WRLCK;
+ flock.l_start = off;
+ flock.l_len = size;
- AFR_ONLIST (locked_on, frame, afr_selfheal_lock_cbk, inodelk,
- dom, &loc, F_SETLK, &flock, NULL);
+ AFR_ONALL(frame, afr_selfheal_lock_cbk, inodelk, dom, &loc, F_SETLK, &flock,
+ NULL);
- loc_wipe (&loc);
+ afr_get_lock_and_eagain_counts(priv, local->replies, &lock_count,
+ &eagain_count);
- return 0;
-}
+ if (lock_count > priv->child_count / 2 && eagain_count) {
+ afr_locked_fill(frame, this, locked_on);
+ afr_selfheal_uninodelk(frame, this, inode, dom, off, size, locked_on);
+
+ AFR_SEQ(frame, afr_selfheal_lock_cbk, inodelk, dom, &loc, F_SETLKW,
+ &flock, NULL);
+ }
+
+ loc_wipe(&loc);
+ return afr_locked_fill(frame, this, locked_on);
+}
int
-afr_selfheal_tryentrylk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, const char *name, unsigned char *locked_on)
+afr_selfheal_uninodelk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, off_t off, size_t size,
+ const unsigned char *locked_on)
{
- loc_t loc = {0,};
+ loc_t loc = {
+ 0,
+ };
+ struct gf_flock flock = {
+ 0,
+ };
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- AFR_ONALL (frame, afr_selfheal_lock_cbk, entrylk, dom,
- &loc, name, ENTRYLK_LOCK_NB, ENTRYLK_WRLCK, NULL);
+ flock.l_type = F_UNLCK;
+ flock.l_start = off;
+ flock.l_len = size;
- loc_wipe (&loc);
+ AFR_ONLIST(locked_on, frame, afr_selfheal_lock_cbk, inodelk, dom, &loc,
+ F_SETLK, &flock, NULL);
- return afr_locked_fill (frame, this, locked_on);
-}
+ loc_wipe(&loc);
+ return 0;
+}
int
-afr_selfheal_entrylk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, const char *name, unsigned char *locked_on)
+afr_selfheal_tryentrylk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, const char *name, unsigned char *locked_on)
{
- loc_t loc = {0,};
- afr_local_t *local = NULL;
- int i = 0;
- afr_private_t *priv = NULL;
+ loc_t loc = {
+ 0,
+ };
- priv = this->private;
- local = frame->local;
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
+ AFR_ONALL(frame, afr_selfheal_lock_cbk, entrylk, dom, &loc, name,
+ ENTRYLK_LOCK_NB, ENTRYLK_WRLCK, NULL);
- AFR_ONALL (frame, afr_selfheal_lock_cbk, entrylk, dom, &loc,
- name, ENTRYLK_LOCK_NB, ENTRYLK_WRLCK, NULL);
+ loc_wipe(&loc);
- for (i = 0; i < priv->child_count; i++) {
- if (local->replies[i].op_ret == -1 &&
- local->replies[i].op_errno == EAGAIN) {
- afr_locked_fill (frame, this, locked_on);
- afr_selfheal_unentrylk (frame, this, inode, dom, name,
- locked_on);
+ return afr_locked_fill(frame, this, locked_on);
+}
- AFR_SEQ (frame, afr_selfheal_lock_cbk, entrylk, dom,
- &loc, name, ENTRYLK_LOCK, ENTRYLK_WRLCK, NULL);
- break;
- }
- }
+int
+afr_selfheal_entrylk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, const char *name, unsigned char *locked_on)
+{
+ loc_t loc = {
+ 0,
+ };
+ afr_local_t *local = NULL;
+ int i = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
+
+ AFR_ONALL(frame, afr_selfheal_lock_cbk, entrylk, dom, &loc, name,
+ ENTRYLK_LOCK_NB, ENTRYLK_WRLCK, NULL);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].op_ret == -1 &&
+ local->replies[i].op_errno == EAGAIN) {
+ afr_locked_fill(frame, this, locked_on);
+ afr_selfheal_unentrylk(frame, this, inode, dom, name, locked_on,
+ NULL);
+
+ AFR_SEQ(frame, afr_selfheal_lock_cbk, entrylk, dom, &loc, name,
+ ENTRYLK_LOCK, ENTRYLK_WRLCK, NULL);
+ break;
+ }
+ }
- loc_wipe (&loc);
+ loc_wipe(&loc);
- return afr_locked_fill (frame, this, locked_on);
+ return afr_locked_fill(frame, this, locked_on);
}
-
int
-afr_selfheal_unentrylk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, const char *name, unsigned char *locked_on)
+afr_selfheal_tie_breaker_entrylk(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, char *dom, const char *name,
+ unsigned char *locked_on)
{
- loc_t loc = {0,};
+ loc_t loc = {
+ 0,
+ };
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int lock_count = 0;
+ int eagain_count = 0;
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
+ priv = this->private;
+ local = frame->local;
- AFR_ONLIST (locked_on, frame, afr_selfheal_lock_cbk, entrylk,
- dom, &loc, name, ENTRYLK_UNLOCK, ENTRYLK_WRLCK, NULL);
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- loc_wipe (&loc);
+ AFR_ONALL(frame, afr_selfheal_lock_cbk, entrylk, dom, &loc, name,
+ ENTRYLK_LOCK_NB, ENTRYLK_WRLCK, NULL);
- return 0;
-}
+ afr_get_lock_and_eagain_counts(priv, local->replies, &lock_count,
+ &eagain_count);
+ if (lock_count > priv->child_count / 2 && eagain_count) {
+ afr_locked_fill(frame, this, locked_on);
+ afr_selfheal_unentrylk(frame, this, inode, dom, name, locked_on, NULL);
-gf_boolean_t
-afr_is_pending_set (xlator_t *this, dict_t *xdata, int type)
-{
- int idx = -1;
- afr_private_t *priv = NULL;
- void *pending_raw = NULL;
- int *pending_int = NULL;
- int i = 0;
+ AFR_SEQ(frame, afr_selfheal_lock_cbk, entrylk, dom, &loc, name,
+ ENTRYLK_LOCK, ENTRYLK_WRLCK, NULL);
+ }
- priv = this->private;
- idx = afr_index_for_transaction_type (type);
+ loc_wipe(&loc);
- if (dict_get_ptr (xdata, AFR_DIRTY, &pending_raw) == 0) {
- if (pending_raw) {
- pending_int = pending_raw;
+ return afr_locked_fill(frame, this, locked_on);
+}
- if (ntoh32 (pending_int[idx]))
- return _gf_true;
- }
- }
+int
+afr_selfheal_unentrylk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, const char *name, unsigned char *locked_on,
+ dict_t *xdata)
+{
+ loc_t loc = {
+ 0,
+ };
- for (i = 0; i < priv->child_count; i++) {
- if (dict_get_ptr (xdata, priv->pending_key[i],
- &pending_raw))
- continue;
- if (!pending_raw)
- continue;
- pending_int = pending_raw;
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
- if (ntoh32 (pending_int[idx]))
- return _gf_true;
- }
+ AFR_ONLIST(locked_on, frame, afr_selfheal_lock_cbk, entrylk, dom, &loc,
+ name, ENTRYLK_UNLOCK, ENTRYLK_WRLCK, xdata);
- return _gf_false;
-}
+ loc_wipe(&loc);
+ return 0;
+}
gf_boolean_t
-afr_is_data_set (xlator_t *this, dict_t *xdata)
+afr_is_data_set(xlator_t *this, dict_t *xdata)
{
- return afr_is_pending_set (this, xdata, AFR_DATA_TRANSACTION);
+ return afr_is_pending_set(this, xdata, AFR_DATA_TRANSACTION);
}
gf_boolean_t
-afr_is_metadata_set (xlator_t *this, dict_t *xdata)
+afr_is_metadata_set(xlator_t *this, dict_t *xdata)
{
- return afr_is_pending_set (this, xdata, AFR_METADATA_TRANSACTION);
+ return afr_is_pending_set(this, xdata, AFR_METADATA_TRANSACTION);
}
gf_boolean_t
-afr_is_entry_set (xlator_t *this, dict_t *xdata)
+afr_is_entry_set(xlator_t *this, dict_t *xdata)
{
- return afr_is_pending_set (this, xdata, AFR_ENTRY_TRANSACTION);
+ return afr_is_pending_set(this, xdata, AFR_ENTRY_TRANSACTION);
}
/*
@@ -1163,286 +2278,310 @@ afr_is_entry_set (xlator_t *this, dict_t *xdata)
*/
int
-afr_selfheal_unlocked_inspect (call_frame_t *frame, xlator_t *this,
- uuid_t gfid, inode_t **link_inode,
- gf_boolean_t *data_selfheal,
- gf_boolean_t *metadata_selfheal,
- gf_boolean_t *entry_selfheal)
-{
- afr_private_t *priv = NULL;
- inode_t *inode = NULL;
- int i = 0;
- int valid_cnt = 0;
- struct iatt first = {0, };
- struct afr_reply *replies = NULL;
- int ret = -1;
-
- priv = this->private;
-
- inode = afr_inode_find (this, gfid);
- if (!inode)
- goto out;
+afr_selfheal_unlocked_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+ inode_t **link_inode, gf_boolean_t *data_selfheal,
+ gf_boolean_t *metadata_selfheal,
+ gf_boolean_t *entry_selfheal,
+ struct afr_reply *replies_dst)
+{
+ afr_private_t *priv = NULL;
+ inode_t *inode = NULL;
+ int i = 0;
+ int valid_cnt = 0;
+ struct iatt first = {
+ 0,
+ };
+ int first_idx = 0;
+ struct afr_reply *replies = NULL;
+ int ret = -1;
+
+ priv = this->private;
+
+ inode = afr_inode_find(this, gfid);
+ if (!inode)
+ goto out;
+
+ replies = alloca0(sizeof(*replies) * priv->child_count);
+
+ ret = afr_selfheal_unlocked_discover(frame, inode, gfid, replies);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+ if (replies[i].op_ret == -1)
+ continue;
+
+ /* The data segment of the changelog can be non-zero to indicate
+ * the directory needs a full heal. So the check below ensures
+ * it's not a directory before setting the data_selfheal boolean.
+ */
+ if (data_selfheal && !IA_ISDIR(replies[i].poststat.ia_type) &&
+ afr_is_data_set(this, replies[i].xdata))
+ *data_selfheal = _gf_true;
- replies = alloca0 (sizeof (*replies) * priv->child_count);
+ if (metadata_selfheal && afr_is_metadata_set(this, replies[i].xdata))
+ *metadata_selfheal = _gf_true;
- ret = afr_selfheal_unlocked_discover (frame, inode, gfid, replies);
- if (ret)
- goto out;
+ if (entry_selfheal && afr_is_entry_set(this, replies[i].xdata))
+ *entry_selfheal = _gf_true;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
- if (replies[i].op_ret == -1)
- continue;
-
- if (data_selfheal && afr_is_data_set (this, replies[i].xdata))
- *data_selfheal = _gf_true;
-
- if (metadata_selfheal &&
- afr_is_metadata_set (this, replies[i].xdata))
- *metadata_selfheal = _gf_true;
-
- if (entry_selfheal && afr_is_entry_set (this, replies[i].xdata))
- *entry_selfheal = _gf_true;
-
- valid_cnt ++;
- if (valid_cnt == 1) {
- first = replies[i].poststat;
- continue;
- }
-
- if (!IA_EQUAL (first, replies[i].poststat, type)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SPLIT_BRAIN,
- "TYPE mismatch %d vs %d on %s for gfid:%s",
- (int) first.ia_type,
- (int) replies[i].poststat.ia_type,
- priv->children[i]->name,
- uuid_utoa (replies[i].poststat.ia_gfid));
- ret = -EIO;
- goto out;
- }
-
- if (!IA_EQUAL (first, replies[i].poststat, uid)) {
- gf_msg_debug (this->name, 0,
- "UID mismatch "
- "%d vs %d on %s for gfid:%s",
- (int) first.ia_uid,
- (int) replies[i].poststat.ia_uid,
- priv->children[i]->name,
- uuid_utoa (replies[i].poststat.ia_gfid));
-
- if (metadata_selfheal)
- *metadata_selfheal = _gf_true;
- }
-
- if (!IA_EQUAL (first, replies[i].poststat, gid)) {
- gf_msg_debug (this->name, 0,
- "GID mismatch "
- "%d vs %d on %s for gfid:%s",
- (int) first.ia_uid,
- (int) replies[i].poststat.ia_uid,
- priv->children[i]->name,
- uuid_utoa (replies[i].poststat.ia_gfid));
-
- if (metadata_selfheal)
- *metadata_selfheal = _gf_true;
- }
-
- if (!IA_EQUAL (first, replies[i].poststat, prot)) {
- gf_msg_debug (this->name, 0,
- "MODE mismatch "
- "%d vs %d on %s for gfid:%s",
- (int) st_mode_from_ia (first.ia_prot, 0),
- (int) st_mode_from_ia
- (replies[i].poststat.ia_prot, 0),
- priv->children[i]->name,
- uuid_utoa (replies[i].poststat.ia_gfid));
-
- if (metadata_selfheal)
- *metadata_selfheal = _gf_true;
- }
-
- if (IA_ISREG(first.ia_type) &&
- !IA_EQUAL (first, replies[i].poststat, size)) {
- gf_msg_debug (this->name, 0,
- "SIZE mismatch "
- "%lld vs %lld on %s for gfid:%s",
- (long long) first.ia_size,
- (long long) replies[i].poststat.ia_size,
- priv->children[i]->name,
- uuid_utoa (replies[i].poststat.ia_gfid));
-
- if (data_selfheal)
- *data_selfheal = _gf_true;
- }
- }
-
- if (valid_cnt > 0 && link_inode) {
- *link_inode = inode_link (inode, NULL, NULL, &first);
- if (!*link_inode) {
- ret = -EINVAL;
- goto out;
- }
- } else if (valid_cnt < 2) {
- ret = afr_check_stale_error (replies, priv);
- goto out;
+ valid_cnt++;
+ if (valid_cnt == 1) {
+ first = replies[i].poststat;
+ first_idx = i;
+ continue;
}
- ret = 0;
-out:
- if (inode)
- inode_unref (inode);
- if (replies)
- afr_replies_wipe (replies, priv->child_count);
+ if (!IA_EQUAL(first, replies[i].poststat, type)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "TYPE mismatch %d vs %d on %s for gfid:%s",
+ (int)first.ia_type, (int)replies[i].poststat.ia_type,
+ priv->children[i]->name,
+ uuid_utoa(replies[i].poststat.ia_gfid));
+ gf_event(EVENT_AFR_SPLIT_BRAIN,
+ "client-pid=%d;"
+ "subvol=%s;"
+ "type=file;gfid=%s;"
+ "ia_type-%d=%s;ia_type-%d=%s",
+ this->ctx->cmd_args.client_pid, this->name,
+ uuid_utoa(replies[i].poststat.ia_gfid), first_idx,
+ gf_inode_type_to_str(first.ia_type), i,
+ gf_inode_type_to_str(replies[i].poststat.ia_type));
+ ret = -EIO;
+ goto out;
+ }
- return ret;
-}
+ if (!IA_EQUAL(first, replies[i].poststat, uid)) {
+ gf_msg_debug(this->name, 0,
+ "UID mismatch "
+ "%d vs %d on %s for gfid:%s",
+ (int)first.ia_uid, (int)replies[i].poststat.ia_uid,
+ priv->children[i]->name,
+ uuid_utoa(replies[i].poststat.ia_gfid));
+
+ if (metadata_selfheal)
+ *metadata_selfheal = _gf_true;
+ }
+
+ if (!IA_EQUAL(first, replies[i].poststat, gid)) {
+ gf_msg_debug(this->name, 0,
+ "GID mismatch "
+ "%d vs %d on %s for gfid:%s",
+ (int)first.ia_uid, (int)replies[i].poststat.ia_uid,
+ priv->children[i]->name,
+ uuid_utoa(replies[i].poststat.ia_gfid));
+ if (metadata_selfheal)
+ *metadata_selfheal = _gf_true;
+ }
+
+ if (!IA_EQUAL(first, replies[i].poststat, prot)) {
+ gf_msg_debug(this->name, 0,
+ "MODE mismatch "
+ "%d vs %d on %s for gfid:%s",
+ (int)st_mode_from_ia(first.ia_prot, 0),
+ (int)st_mode_from_ia(replies[i].poststat.ia_prot, 0),
+ priv->children[i]->name,
+ uuid_utoa(replies[i].poststat.ia_gfid));
+
+ if (metadata_selfheal)
+ *metadata_selfheal = _gf_true;
+ }
+
+ if (IA_ISREG(first.ia_type) &&
+ !IA_EQUAL(first, replies[i].poststat, size)) {
+ gf_msg_debug(this->name, 0,
+ "SIZE mismatch "
+ "%lld vs %lld on %s for gfid:%s",
+ (long long)first.ia_size,
+ (long long)replies[i].poststat.ia_size,
+ priv->children[i]->name,
+ uuid_utoa(replies[i].poststat.ia_gfid));
+
+ if (data_selfheal)
+ *data_selfheal = _gf_true;
+ }
+ }
+
+ if (valid_cnt > 0 && link_inode) {
+ *link_inode = inode_link(inode, NULL, NULL, &first);
+ if (!*link_inode) {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else if (valid_cnt < 2) {
+ ret = afr_check_stale_error(replies, priv);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (replies && replies_dst)
+ afr_replies_copy(replies_dst, replies, priv->child_count);
+ if (inode)
+ inode_unref(inode);
+ if (replies)
+ afr_replies_wipe(replies, priv->child_count);
+
+ return ret;
+}
inode_t *
-afr_inode_find (xlator_t *this, uuid_t gfid)
+afr_inode_find(xlator_t *this, uuid_t gfid)
{
- inode_table_t *table = NULL;
- inode_t *inode = NULL;
+ inode_table_t *table = NULL;
+ inode_t *inode = NULL;
- table = this->itable;
- if (!table)
- return NULL;
+ table = this->itable;
+ if (!table)
+ return NULL;
- inode = inode_find (table, gfid);
- if (inode)
- return inode;
+ inode = inode_find(table, gfid);
+ if (inode)
+ return inode;
- inode = inode_new (table);
- if (!inode)
- return NULL;
+ inode = inode_new(table);
+ if (!inode)
+ return NULL;
- gf_uuid_copy (inode->gfid, gfid);
+ gf_uuid_copy(inode->gfid, gfid);
- return inode;
+ return inode;
}
-
call_frame_t *
-afr_frame_create (xlator_t *this)
+afr_frame_create(xlator_t *this, int32_t *op_errno)
{
- call_frame_t *frame = NULL;
- afr_local_t *local = NULL;
- int op_errno = 0;
- pid_t pid = GF_CLIENT_PID_SELF_HEALD;
-
- frame = create_frame (this, this->ctx->pool);
- if (!frame)
- return NULL;
+ call_frame_t *frame = NULL;
+ afr_local_t *local = NULL;
+ pid_t pid = GF_CLIENT_PID_SELF_HEALD;
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ if (op_errno)
+ *op_errno = ENOMEM;
+ return NULL;
+ }
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local) {
- STACK_DESTROY (frame->root);
- return NULL;
- }
+ local = AFR_FRAME_INIT(frame, (*op_errno));
+ if (!local) {
+ STACK_DESTROY(frame->root);
+ return NULL;
+ }
- syncopctx_setfspid (&pid);
+ syncopctx_setfspid(&pid);
- frame->root->pid = pid;
+ frame->root->pid = pid;
- afr_set_lk_owner (frame, this, frame->root);
+ afr_set_lk_owner(frame, this, frame->root);
- return frame;
+ return frame;
}
int
-afr_selfheal_newentry_mark (call_frame_t *frame, xlator_t *this, inode_t *inode,
- int source, struct afr_reply *replies,
- unsigned char *sources, unsigned char *newentry)
+afr_selfheal_newentry_mark(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ int source, struct afr_reply *replies,
+ unsigned char *sources, unsigned char *newentry)
{
- int ret = 0;
- int i = 0;
- afr_private_t *priv = NULL;
- dict_t *xattr = NULL;
- int **changelog = NULL;
+ int ret = 0;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ dict_t *xattr = NULL;
+ int **changelog = NULL;
- priv = this->private;
+ priv = this->private;
- gf_uuid_copy (inode->gfid, replies[source].poststat.ia_gfid);
+ gf_uuid_copy(inode->gfid, replies[source].poststat.ia_gfid);
- xattr = dict_new();
- if (!xattr)
- return -ENOMEM;
+ xattr = dict_new();
+ if (!xattr)
+ return -ENOMEM;
- changelog = afr_mark_pending_changelog (priv, newentry, xattr,
- replies[source].poststat.ia_type);
+ changelog = afr_mark_pending_changelog(priv, newentry, xattr,
+ replies[source].poststat.ia_type);
- if (!changelog)
- goto out;
+ if (!changelog) {
+ ret = -ENOMEM;
+ goto out;
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- afr_selfheal_post_op (frame, this, inode, i, xattr);
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ ret |= afr_selfheal_post_op(frame, this, inode, i, xattr, NULL);
+ }
out:
- if (changelog)
- afr_matrix_cleanup (changelog, priv->child_count);
- if (xattr)
- dict_unref (xattr);
- return ret;
+ if (changelog)
+ afr_matrix_cleanup(changelog, priv->child_count);
+ if (xattr)
+ dict_unref(xattr);
+ return ret;
}
int
-afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid)
-{
- int ret = -1;
- int entry_ret = 1;
- int metadata_ret = 1;
- int data_ret = 1;
- int or_ret = 0;
- inode_t *inode = NULL;
- gf_boolean_t data_selfheal = _gf_false;
- gf_boolean_t metadata_selfheal = _gf_false;
- gf_boolean_t entry_selfheal = _gf_false;
- afr_private_t *priv = NULL;
- gf_boolean_t dataheal_enabled = _gf_false;
-
- priv = this->private;
- gf_string2boolean (priv->data_self_heal, &dataheal_enabled);
-
- ret = afr_selfheal_unlocked_inspect (frame, this, gfid, &inode,
- &data_selfheal,
- &metadata_selfheal,
- &entry_selfheal);
- if (ret)
- goto out;
-
- if (!(data_selfheal || metadata_selfheal || entry_selfheal)) {
- ret = 2;
- goto out;
+afr_selfheal_do(call_frame_t *frame, xlator_t *this, uuid_t gfid)
+{
+ int ret = -1;
+ int entry_ret = 1;
+ int metadata_ret = 1;
+ int data_ret = 1;
+ int or_ret = 0;
+ inode_t *inode = NULL;
+ fd_t *fd = NULL;
+ gf_boolean_t data_selfheal = _gf_false;
+ gf_boolean_t metadata_selfheal = _gf_false;
+ gf_boolean_t entry_selfheal = _gf_false;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ ret = afr_selfheal_unlocked_inspect(frame, this, gfid, &inode,
+ &data_selfheal, &metadata_selfheal,
+ &entry_selfheal, NULL);
+ if (ret)
+ goto out;
+
+ if (!(data_selfheal || metadata_selfheal || entry_selfheal)) {
+ ret = 2;
+ goto out;
+ }
+
+ if (inode->ia_type == IA_IFREG) {
+ ret = afr_selfheal_data_open(this, inode, &fd);
+ if (!fd) {
+ ret = -EIO;
+ goto out;
}
+ }
- if (data_selfheal && dataheal_enabled)
- data_ret = afr_selfheal_data (frame, this, inode);
+ if (data_selfheal && priv->data_self_heal)
+ data_ret = afr_selfheal_data(frame, this, fd);
- if (metadata_selfheal && priv->metadata_self_heal)
- metadata_ret = afr_selfheal_metadata (frame, this, inode);
+ if (metadata_selfheal && priv->metadata_self_heal)
+ metadata_ret = afr_selfheal_metadata(frame, this, inode);
- if (entry_selfheal && priv->entry_self_heal)
- entry_ret = afr_selfheal_entry (frame, this, inode);
+ if (entry_selfheal && priv->entry_self_heal)
+ entry_ret = afr_selfheal_entry(frame, this, inode);
- or_ret = (data_ret | metadata_ret | entry_ret);
+ or_ret = (data_ret | metadata_ret | entry_ret);
- if (data_ret == -EIO || metadata_ret == -EIO || entry_ret == -EIO)
- ret = -EIO;
- else if (data_ret == 1 && metadata_ret == 1 && entry_ret == 1)
- ret = 1;
- else if (or_ret < 0)
- ret = or_ret;
- else
- ret = 0;
+ if (data_ret == -EIO || metadata_ret == -EIO || entry_ret == -EIO)
+ ret = -EIO;
+ else if (data_ret == 1 && metadata_ret == 1 && entry_ret == 1)
+ ret = 1;
+ else if (or_ret < 0)
+ ret = or_ret;
+ else
+ ret = 0;
out:
- if (inode)
- inode_unref (inode);
- return ret;
+ if (inode)
+ inode_unref(inode);
+ if (fd)
+ fd_unref(fd);
+ return ret;
}
/*
* This is the entry point for healing a given GFID. The return values for this
@@ -1454,155 +2593,342 @@ out:
*/
int
-afr_selfheal (xlator_t *this, uuid_t gfid)
+afr_selfheal(xlator_t *this, uuid_t gfid)
{
- int ret = -1;
- call_frame_t *frame = NULL;
+ int ret = -1;
+ call_frame_t *frame = NULL;
+ afr_local_t *local = NULL;
+
+ frame = afr_frame_create(this, NULL);
+ if (!frame)
+ return ret;
- frame = afr_frame_create (this);
- if (!frame)
- return ret;
+ local = frame->local;
+ local->xdata_req = dict_new();
- ret = afr_selfheal_do (frame, this, gfid);
+ ret = afr_selfheal_do(frame, this, gfid);
- if (frame)
- AFR_STACK_DESTROY (frame);
+ if (frame)
+ AFR_STACK_DESTROY(frame);
- return ret;
+ return ret;
}
-afr_local_t*
-__afr_dequeue_heals (afr_private_t *priv)
+afr_local_t *
+__afr_dequeue_heals(afr_private_t *priv)
{
- afr_local_t *local = NULL;
-
- if (list_empty (&priv->heal_waiting))
- goto none;
- if ((priv->background_self_heal_count > 0) &&
- (priv->healers >= priv->background_self_heal_count))
- goto none;
-
- local = list_entry (priv->heal_waiting.next, afr_local_t, healer);
- priv->heal_waiters--;
- GF_ASSERT (priv->heal_waiters >= 0);
- list_del_init(&local->healer);
- list_add(&local->healer, &priv->healing);
- priv->healers++;
- return local;
+ afr_local_t *local = NULL;
+
+ if (list_empty(&priv->heal_waiting))
+ goto none;
+ if ((priv->background_self_heal_count > 0) &&
+ (priv->healers >= priv->background_self_heal_count))
+ goto none;
+
+ local = list_entry(priv->heal_waiting.next, afr_local_t, healer);
+ priv->heal_waiters--;
+ GF_ASSERT(priv->heal_waiters >= 0);
+ list_del_init(&local->healer);
+ list_add(&local->healer, &priv->healing);
+ priv->healers++;
+ return local;
none:
- gf_msg_debug (THIS->name, 0, "Nothing dequeued. "
- "Num healers: %d, Num Waiters: %d",
- priv->healers, priv->heal_waiters);
- return NULL;
+ gf_msg_debug(THIS->name, 0,
+ "Nothing dequeued. "
+ "Num healers: %d, Num Waiters: %d",
+ priv->healers, priv->heal_waiters);
+ return NULL;
}
int
-afr_refresh_selfheal_wrap (void *opaque)
+afr_refresh_selfheal_wrap(void *opaque)
{
- call_frame_t *heal_frame = opaque;
- afr_local_t *local = heal_frame->local;
- int ret = 0;
+ call_frame_t *heal_frame = opaque;
+ afr_local_t *local = heal_frame->local;
+ int ret = 0;
- ret = afr_selfheal (heal_frame->this, local->refreshinode->gfid);
- return ret;
+ ret = afr_selfheal(heal_frame->this, local->refreshinode->gfid);
+ return ret;
}
int
-afr_refresh_heal_done (int ret, call_frame_t *frame, void *opaque)
-{
- call_frame_t *heal_frame = opaque;
- xlator_t *this = heal_frame->this;
- afr_private_t *priv = this->private;
- afr_local_t *local = heal_frame->local;
-
- LOCK (&priv->lock);
- {
- list_del_init(&local->healer);
- priv->healers--;
- GF_ASSERT (priv->healers >= 0);
- local = __afr_dequeue_heals (priv);
- }
- UNLOCK (&priv->lock);
+afr_refresh_heal_done(int ret, call_frame_t *frame, void *opaque)
+{
+ call_frame_t *heal_frame = opaque;
+ xlator_t *this = heal_frame->this;
+ afr_private_t *priv = this->private;
+ afr_local_t *local = heal_frame->local;
- if (heal_frame)
- AFR_STACK_DESTROY (heal_frame);
+ LOCK(&priv->lock);
+ {
+ list_del_init(&local->healer);
+ priv->healers--;
+ GF_ASSERT(priv->healers >= 0);
+ local = __afr_dequeue_heals(priv);
+ }
+ UNLOCK(&priv->lock);
- if (local)
- afr_heal_synctask (this, local);
- return 0;
+ AFR_STACK_DESTROY(heal_frame);
+ if (local)
+ afr_heal_synctask(this, local);
+ return 0;
}
void
-afr_heal_synctask (xlator_t *this, afr_local_t *local)
+afr_heal_synctask(xlator_t *this, afr_local_t *local)
+{
+ int ret = 0;
+ call_frame_t *heal_frame = NULL;
+
+ heal_frame = local->heal_frame;
+ ret = synctask_new(this->ctx->env, afr_refresh_selfheal_wrap,
+ afr_refresh_heal_done, heal_frame, heal_frame);
+ if (ret < 0)
+ /* Heal not launched. Will be queued when the next inode
+ * refresh happens and shd hasn't healed it yet. */
+ afr_refresh_heal_done(ret, heal_frame, heal_frame);
+}
+
+gf_boolean_t
+afr_throttled_selfheal(call_frame_t *frame, xlator_t *this)
{
- int ret = 0;
- call_frame_t *heal_frame = NULL;
+ gf_boolean_t can_heal = _gf_true;
+ afr_private_t *priv = this->private;
+ afr_local_t *local = frame->local;
+
+ LOCK(&priv->lock);
+ {
+ if ((priv->background_self_heal_count > 0) &&
+ (priv->heal_wait_qlen + priv->background_self_heal_count) >
+ (priv->heal_waiters + priv->healers)) {
+ list_add_tail(&local->healer, &priv->heal_waiting);
+ priv->heal_waiters++;
+ local = __afr_dequeue_heals(priv);
+ } else {
+ can_heal = _gf_false;
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (can_heal) {
+ if (local)
+ afr_heal_synctask(this, local);
+ else
+ gf_msg_debug(this->name, 0,
+ "Max number of heals are "
+ "pending, background self-heal rejected.");
+ }
- heal_frame = local->heal_frame;
- ret = synctask_new (this->ctx->env, afr_refresh_selfheal_wrap,
- afr_refresh_heal_done, heal_frame, heal_frame);
- if (ret < 0)
- /* Heal not launched. Will be queued when the next inode
- * refresh happens and shd hasn't healed it yet. */
- afr_refresh_heal_done (ret, heal_frame, heal_frame);
+ return can_heal;
}
-void
-afr_throttled_selfheal (call_frame_t *frame, xlator_t *this)
-{
- gf_boolean_t can_heal = _gf_true;
- afr_private_t *priv = this->private;
- afr_local_t *local = frame->local;
-
- LOCK (&priv->lock);
- {
- if ((priv->background_self_heal_count > 0) &&
- (priv->heal_wait_qlen + priv->background_self_heal_count) >
- (priv->heal_waiters + priv->healers)) {
- list_add_tail(&local->healer, &priv->heal_waiting);
- priv->heal_waiters++;
- local = __afr_dequeue_heals (priv);
- } else {
- can_heal = _gf_false;
- }
+int
+afr_choose_source_by_policy(afr_private_t *priv, unsigned char *sources,
+ afr_transaction_type type)
+{
+ int source = -1;
+ int i = 0;
+
+ /* Give preference to local child to save on bandwidth */
+ for (i = 0; i < priv->child_count; i++) {
+ if (priv->local[i] && sources[i]) {
+ if ((type == AFR_DATA_TRANSACTION) && AFR_IS_ARBITER_BRICK(priv, i))
+ continue;
+
+ source = i;
+ goto out;
}
- UNLOCK (&priv->lock);
-
- if (can_heal) {
- if (local)
- afr_heal_synctask (this, local);
- else
- gf_msg_debug (this->name, 0, "Max number of heals are "
- "pending, background self-heal rejected.");
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i]) {
+ source = i;
+ goto out;
}
+ }
+out:
+ return source;
+}
+
+static int
+afr_anon_inode_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ afr_local_t *local = frame->local;
+ int i = (long)cookie;
+
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+ if (op_ret == 0) {
+ local->op_ret = 0;
+ local->replies[i].poststat = *buf;
+ local->replies[i].preparent = *preparent;
+ local->replies[i].postparent = *postparent;
+ }
+ if (xdata) {
+ local->replies[i].xdata = dict_ref(xdata);
+ }
+
+ syncbarrier_wake(&local->barrier);
+ return 0;
}
int
-afr_choose_source_by_policy (afr_private_t *priv, unsigned char *sources,
- afr_transaction_type type)
+afr_anon_inode_create(xlator_t *this, int child, inode_t **linked_inode)
{
- int source = -1;
- int i = 0;
+ call_frame_t *frame = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = this->private;
+ unsigned char *mkdir_on = alloca0(priv->child_count);
+ unsigned char *lookup_on = alloca0(priv->child_count);
+ loc_t loc = {0};
+ int32_t op_errno = 0;
+ int32_t child_op_errno = 0;
+ struct iatt iatt = {0};
+ dict_t *xdata = NULL;
+ uuid_t anon_inode_gfid = {0};
+ int mkdir_count = 0;
+ int i = 0;
+
+ /*Try to mkdir everywhere and return success if the dir exists on 'child'
+ */
+
+ if (!priv->use_anon_inode) {
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ frame = afr_frame_create(this, &op_errno);
+ if (op_errno) {
+ goto out;
+ }
+ local = frame->local;
+ if (!local->child_up[child]) {
+ /*Other bricks may need mkdir so don't error out yet*/
+ child_op_errno = ENOTCONN;
+ }
+ gf_uuid_parse(priv->anon_gfid_str, anon_inode_gfid);
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->child_up[i])
+ continue;
+
+ if (priv->anon_inode[i]) {
+ mkdir_on[i] = 0;
+ } else {
+ mkdir_on[i] = 1;
+ mkdir_count++;
+ }
+ }
- /* Give preference to local child to save on bandwidth */
- for (i = 0; i < priv->child_count; i++) {
- if (priv->local[i] && sources[i]) {
- if ((type == AFR_DATA_TRANSACTION) &&
- AFR_IS_ARBITER_BRICK (priv, i))
- continue;
+ if (mkdir_count == 0) {
+ *linked_inode = inode_find(this->itable, anon_inode_gfid);
+ if (*linked_inode) {
+ op_errno = 0;
+ goto out;
+ }
+ }
+
+ loc.parent = inode_ref(this->itable->root);
+ loc.name = priv->anon_inode_name;
+ loc.inode = inode_new(this->itable);
+ if (!loc.inode) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ xdata = dict_new();
+ if (!xdata) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ op_errno = -dict_set_gfuuid(xdata, "gfid-req", anon_inode_gfid, _gf_true);
+ if (op_errno) {
+ goto out;
+ }
+
+ if (mkdir_count == 0) {
+ memcpy(lookup_on, local->child_up, priv->child_count);
+ goto lookup;
+ }
+
+ AFR_ONLIST(mkdir_on, frame, afr_anon_inode_mkdir_cbk, mkdir, &loc, 0755, 0,
+ xdata);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!mkdir_on[i]) {
+ continue;
+ }
- source = i;
- goto out;
- }
+ if (local->replies[i].op_ret == 0) {
+ priv->anon_inode[i] = 1;
+ iatt = local->replies[i].poststat;
+ } else if (local->replies[i].op_ret < 0 &&
+ local->replies[i].op_errno == EEXIST) {
+ lookup_on[i] = 1;
+ } else if (i == child) {
+ child_op_errno = local->replies[i].op_errno;
+ }
+ }
+
+ if (AFR_COUNT(lookup_on, priv->child_count) == 0) {
+ goto link;
+ }
+
+lookup:
+ AFR_ONLIST(lookup_on, frame, afr_selfheal_discover_cbk, lookup, &loc,
+ xdata);
+ for (i = 0; i < priv->child_count; i++) {
+ if (!lookup_on[i]) {
+ continue;
}
- for (i = 0; i < priv->child_count; i++) {
- if (sources[i]) {
- source = i;
- goto out;
- }
+ if (local->replies[i].op_ret == 0) {
+ if (gf_uuid_compare(anon_inode_gfid,
+ local->replies[i].poststat.ia_gfid) == 0) {
+ priv->anon_inode[i] = 1;
+ iatt = local->replies[i].poststat;
+ } else {
+ if (i == child)
+ child_op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_DATA,
+ "%s has gfid: %s", priv->anon_inode_name,
+ uuid_utoa(local->replies[i].poststat.ia_gfid));
+ }
+ } else if (i == child) {
+ child_op_errno = local->replies[i].op_errno;
+ }
+ }
+link:
+ if (!gf_uuid_is_null(iatt.ia_gfid)) {
+ *linked_inode = inode_link(loc.inode, loc.parent, loc.name, &iatt);
+ if (*linked_inode) {
+ op_errno = 0;
+ inode_lookup(*linked_inode);
+ } else {
+ op_errno = ENOMEM;
}
+ goto out;
+ }
+
out:
- return source;
+ if (xdata)
+ dict_unref(xdata);
+ loc_wipe(&loc);
+ /*child_op_errno takes precedence*/
+ if (child_op_errno == 0) {
+ child_op_errno = op_errno;
+ }
+
+ if (child_op_errno && *linked_inode) {
+ inode_unref(*linked_inode);
+ *linked_inode = NULL;
+ }
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ return -child_op_errno;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c
index a2dddafe5c0..37bcc2b3f9e 100644
--- a/xlators/cluster/afr/src/afr-self-heal-data.c
+++ b/xlators/cluster/afr/src/afr-self-heal-data.c
@@ -8,610 +8,592 @@
cases as published by the Free Software Foundation.
*/
-
#include "afr.h"
#include "afr-self-heal.h"
-#include "byte-order.h"
+#include <glusterfs/byte-order.h>
#include "protocol-common.h"
#include "afr-messages.h"
-
-enum {
- AFR_SELFHEAL_DATA_FULL = 0,
- AFR_SELFHEAL_DATA_DIFF,
-};
-
+#include <glusterfs/events.h>
#define HAS_HOLES(i) ((i->ia_blocks * 512) < (i->ia_size))
static int
-__checksum_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, uint32_t weak, uint8_t *strong,
- dict_t *xdata)
+__checksum_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, uint32_t weak, uint8_t *strong, dict_t *xdata)
{
- afr_local_t *local = NULL;
- struct afr_reply *replies = NULL;
- int i = (long) cookie;
-
- local = frame->local;
- replies = local->replies;
-
- replies[i].valid = 1;
- replies[i].op_ret = op_ret;
- replies[i].op_errno = op_errno;
- if (xdata)
- replies[i].buf_has_zeroes = dict_get_str_boolean (xdata,
- "buf-has-zeroes", _gf_false);
- if (strong)
- memcpy (local->replies[i].checksum, strong, MD5_DIGEST_LENGTH);
-
- syncbarrier_wake (&local->barrier);
- return 0;
-}
-
-
-static int
-attr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, struct iatt *pre, struct iatt *post,
- dict_t *xdata)
-{
- int i = (long) cookie;
- afr_local_t *local = NULL;
-
- local = frame->local;
-
- local->replies[i].valid = 1;
- local->replies[i].op_ret = op_ret;
- local->replies[i].op_errno = op_errno;
- if (pre)
- local->replies[i].prestat = *pre;
- if (post)
- local->replies[i].poststat = *post;
- if (xdata)
- local->replies[i].xdata = dict_ref (xdata);
-
- syncbarrier_wake (&local->barrier);
+ afr_local_t *local = NULL;
+ struct afr_reply *replies = NULL;
+ int i = (long)cookie;
+
+ local = frame->local;
+ replies = local->replies;
+
+ replies[i].valid = 1;
+ replies[i].op_ret = op_ret;
+ replies[i].op_errno = op_errno;
+ if (xdata) {
+ replies[i].buf_has_zeroes = dict_get_str_boolean(
+ xdata, "buf-has-zeroes", _gf_false);
+ replies[i].fips_mode_rchecksum = dict_get_str_boolean(
+ xdata, "fips-mode-rchecksum", _gf_false);
+ }
+ if (strong) {
+ if (replies[i].fips_mode_rchecksum) {
+ memcpy(local->replies[i].checksum, strong, SHA256_DIGEST_LENGTH);
+ } else {
+ memcpy(local->replies[i].checksum, strong, MD5_DIGEST_LENGTH);
+ }
+ }
- return 0;
+ syncbarrier_wake(&local->barrier);
+ return 0;
}
-
static gf_boolean_t
-__afr_can_skip_data_block_heal (call_frame_t *frame, xlator_t *this, fd_t *fd,
- int source, unsigned char *healed_sinks,
- off_t offset, size_t size,
- struct iatt *poststat)
+__afr_can_skip_data_block_heal(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int source, unsigned char *healed_sinks,
+ off_t offset, size_t size, struct iatt *poststat)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- unsigned char *wind_subvols = NULL;
- gf_boolean_t checksum_match = _gf_true;
- dict_t *xdata = NULL;
- int i = 0;
-
- priv = this->private;
- local = frame->local;
- xdata = dict_new();
- if (xdata)
- i = dict_set_int32 (xdata, "check-zero-filled", 1);
- wind_subvols = alloca0 (priv->child_count);
- for (i = 0; i < priv->child_count; i++) {
- if (i == source || healed_sinks[i])
- wind_subvols[i] = 1;
- }
-
- AFR_ONLIST (wind_subvols, frame, __checksum_cbk, rchecksum, fd,
- offset, size, xdata);
- if (xdata)
- dict_unref (xdata);
-
- if (!local->replies[source].valid || local->replies[source].op_ret != 0)
- return _gf_false;
-
- for (i = 0; i < priv->child_count; i++) {
- if (i == source)
- continue;
- if (local->replies[i].valid) {
- if (memcmp (local->replies[source].checksum,
- local->replies[i].checksum,
- MD5_DIGEST_LENGTH)) {
- checksum_match = _gf_false;
- break;
- }
- }
- }
-
- if (checksum_match) {
- if (HAS_HOLES (poststat))
- return _gf_true;
-
- /* For non-sparse files, we might be better off writing the
- * zeroes to sinks to avoid mismatch of disk-usage in bricks. */
- if (local->replies[source].buf_has_zeroes)
- return _gf_false;
- else
- return _gf_true;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ unsigned char *wind_subvols = NULL;
+ gf_boolean_t checksum_match = _gf_true;
+ struct afr_reply *replies = NULL;
+ dict_t *xdata = NULL;
+ int i = 0;
+
+ priv = this->private;
+ local = frame->local;
+ replies = local->replies;
+
+ xdata = dict_new();
+ if (!xdata)
+ goto out;
+ if (dict_set_int32_sizen(xdata, "check-zero-filled", 1)) {
+ dict_unref(xdata);
+ goto out;
+ }
+
+ wind_subvols = alloca0(priv->child_count);
+ for (i = 0; i < priv->child_count; i++) {
+ if (i == source || healed_sinks[i])
+ wind_subvols[i] = 1;
+ }
+
+ AFR_ONLIST(wind_subvols, frame, __checksum_cbk, rchecksum, fd, offset, size,
+ xdata);
+ if (xdata)
+ dict_unref(xdata);
+
+ if (!replies[source].valid || replies[source].op_ret != 0)
+ return _gf_false;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (i == source)
+ continue;
+ if (replies[i].valid) {
+ if (memcmp(replies[source].checksum, replies[i].checksum,
+ replies[source].fips_mode_rchecksum
+ ? SHA256_DIGEST_LENGTH
+ : MD5_DIGEST_LENGTH)) {
+ checksum_match = _gf_false;
+ break;
+ }
}
+ }
- return _gf_false;
-}
+ if (checksum_match) {
+ if (HAS_HOLES(poststat))
+ return _gf_true;
+ /* For non-sparse files, we might be better off writing the
+ * zeroes to sinks to avoid mismatch of disk-usage in bricks. */
+ if (local->replies[source].buf_has_zeroes)
+ return _gf_false;
+ else
+ return _gf_true;
+ }
+out:
+ return _gf_false;
+}
static gf_boolean_t
-__afr_is_sink_zero_filled (xlator_t *this, fd_t *fd, size_t size,
- off_t offset, int sink)
+__afr_is_sink_zero_filled(xlator_t *this, fd_t *fd, size_t size, off_t offset,
+ int sink)
{
- afr_private_t *priv = NULL;
- struct iobref *iobref = NULL;
- struct iovec *iovec = NULL;
- int count = 0;
- int ret = 0;
- gf_boolean_t zero_filled = _gf_false;
-
- priv = this->private;
- ret = syncop_readv (priv->children[sink], fd, size, offset, 0, &iovec,
- &count, &iobref, NULL, NULL);
- if (ret < 0)
- goto out;
- ret = iov_0filled (iovec, count);
- if (!ret)
- zero_filled = _gf_true;
+ afr_private_t *priv = NULL;
+ struct iobref *iobref = NULL;
+ struct iovec *iovec = NULL;
+ int count = 0;
+ int ret = 0;
+ gf_boolean_t zero_filled = _gf_false;
+
+ priv = this->private;
+ ret = syncop_readv(priv->children[sink], fd, size, offset, 0, &iovec,
+ &count, &iobref, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ ret = iov_0filled(iovec, count);
+ if (!ret)
+ zero_filled = _gf_true;
out:
- if (iovec)
- GF_FREE (iovec);
- if (iobref)
- iobref_unref (iobref);
- return zero_filled;
+ if (iovec)
+ GF_FREE(iovec);
+ if (iobref)
+ iobref_unref(iobref);
+ return zero_filled;
}
static int
-__afr_selfheal_data_read_write (call_frame_t *frame, xlator_t *this, fd_t *fd,
- int source, unsigned char *healed_sinks,
- off_t offset, size_t size,
- struct afr_reply *replies, int type)
+__afr_selfheal_data_read_write(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int source, unsigned char *healed_sinks,
+ off_t offset, size_t size,
+ struct afr_reply *replies, int type)
{
- struct iovec *iovec = NULL;
- int count = 0;
- struct iobref *iobref = NULL;
- int ret = 0;
- int i = 0;
- afr_private_t *priv = NULL;
-
- priv = this->private;
-
- ret = syncop_readv (priv->children[source], fd, size, offset, 0,
- &iovec, &count, &iobref, NULL, NULL);
- if (ret <= 0)
- return ret;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!healed_sinks[i])
- continue;
-
- /*
- * TODO: Use fiemap() and discard() to heal holes
- * in the future.
- *
- * For now,
- *
- * - if the source had any holes at all,
- * AND
- * - if we are writing past the original file size
- * of the sink
- * AND
- * - is NOT the last block of the source file. if
- * the block contains EOF, it has to be written
- * in order to set the file size even if the
- * last block is 0-filled.
- * AND
- * - if the read buffer is filled with only 0's
- *
- * then, skip writing to this source. We don't depend
- * on the write to happen to update the size as we
- * have performed an ftruncate() upfront anyways.
- */
-#define is_last_block(o,b,s) ((s >= o) && (s <= (o + b)))
- if (HAS_HOLES ((&replies[source].poststat)) &&
- offset >= replies[i].poststat.ia_size &&
- !is_last_block (offset, size,
- replies[source].poststat.ia_size) &&
- (iov_0filled (iovec, count) == 0))
- continue;
-
- /* Avoid filling up sparse regions of the sink with 0-filled
- * writes.*/
- if (type == AFR_SELFHEAL_DATA_FULL &&
- HAS_HOLES ((&replies[source].poststat)) &&
- ((offset + size) <= replies[i].poststat.ia_size) &&
- (iov_0filled (iovec, count) == 0) &&
- __afr_is_sink_zero_filled (this, fd, size, offset, i)) {
- continue;
- }
-
- ret = syncop_writev (priv->children[i], fd, iovec, count,
- offset, iobref, 0, NULL, NULL);
- if (ret != iov_length (iovec, count)) {
- /* write() failed on this sink. unset the corresponding
- member in sinks[] (which is healed_sinks[] in the
- caller) so that this server does NOT get considered
- as successfully healed.
- */
- healed_sinks[i] = 0;
- }
- }
- if (iovec)
- GF_FREE (iovec);
- if (iobref)
- iobref_unref (iobref);
-
- return ret;
+ struct iovec *iovec = NULL;
+ int count = 0;
+ struct iobref *iobref = NULL;
+ int ret = 0;
+ int i = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ ret = syncop_readv(priv->children[source], fd, size, offset, 0, &iovec,
+ &count, &iobref, NULL, NULL, NULL);
+ if (ret <= 0)
+ return ret;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!healed_sinks[i])
+ continue;
+
+ /*
+ * TODO: Use fiemap() and discard() to heal holes
+ * in the future.
+ *
+ * For now,
+ *
+ * - if the source had any holes at all,
+ * AND
+ * - if we are writing past the original file size
+ * of the sink
+ * AND
+ * - is NOT the last block of the source file. if
+ * the block contains EOF, it has to be written
+ * in order to set the file size even if the
+ * last block is 0-filled.
+ * AND
+ * - if the read buffer is filled with only 0's
+ *
+ * then, skip writing to this source. We don't depend
+ * on the write to happen to update the size as we
+ * have performed an ftruncate() upfront anyways.
+ */
+#define is_last_block(o, b, s) ((s >= o) && (s <= (o + b)))
+ if (HAS_HOLES((&replies[source].poststat)) &&
+ offset >= replies[i].poststat.ia_size &&
+ !is_last_block(offset, size, replies[source].poststat.ia_size) &&
+ (iov_0filled(iovec, count) == 0))
+ continue;
+
+ /* Avoid filling up sparse regions of the sink with 0-filled
+ * writes.*/
+ if (type == AFR_SELFHEAL_DATA_FULL &&
+ HAS_HOLES((&replies[source].poststat)) &&
+ ((offset + size) <= replies[i].poststat.ia_size) &&
+ (iov_0filled(iovec, count) == 0) &&
+ __afr_is_sink_zero_filled(this, fd, size, offset, i)) {
+ continue;
+ }
+
+ ret = syncop_writev(priv->children[i], fd, iovec, count, offset, iobref,
+ 0, NULL, NULL, NULL, NULL);
+ if (ret != iov_length(iovec, count)) {
+ /* write() failed on this sink. unset the corresponding
+ member in sinks[] (which is healed_sinks[] in the
+ caller) so that this server does NOT get considered
+ as successfully healed.
+ */
+ healed_sinks[i] = 0;
+ }
+ }
+ if (iovec)
+ GF_FREE(iovec);
+ if (iobref)
+ iobref_unref(iobref);
+
+ return ret;
}
-static int
-afr_selfheal_data_block (call_frame_t *frame, xlator_t *this, fd_t *fd,
- int source, unsigned char *healed_sinks, off_t offset,
- size_t size, int type, struct afr_reply *replies)
+static gf_boolean_t
+afr_source_sinks_locked(xlator_t *this, unsigned char *locked_on, int source,
+ unsigned char *healed_sinks)
{
- int ret = -1;
- int sink_count = 0;
- afr_private_t *priv = NULL;
- unsigned char *data_lock = NULL;
-
- priv = this->private;
- sink_count = AFR_COUNT (healed_sinks, priv->child_count);
- data_lock = alloca0 (priv->child_count);
-
- ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name,
- offset, size, data_lock);
- {
- if (ret < sink_count) {
- ret = -ENOTCONN;
- goto unlock;
- }
-
- if (type == AFR_SELFHEAL_DATA_DIFF &&
- __afr_can_skip_data_block_heal (frame, this, fd, source,
- healed_sinks, offset, size,
- &replies[source].poststat)) {
- ret = 0;
- goto unlock;
- }
-
- ret = __afr_selfheal_data_read_write (frame, this, fd, source,
- healed_sinks, offset, size,
- replies, type);
- }
-unlock:
- afr_selfheal_uninodelk (frame, this, fd->inode, this->name,
- offset, size, data_lock);
- return ret;
-}
+ afr_private_t *priv = this->private;
+ int i = 0;
+
+ if (!locked_on[source])
+ return _gf_false;
+ for (i = 0; i < priv->child_count; i++) {
+ if (healed_sinks[i] && locked_on[i])
+ return _gf_true;
+ }
+ return _gf_false;
+}
static int
-afr_selfheal_data_fsync (call_frame_t *frame, xlator_t *this, fd_t *fd,
- unsigned char *healed_sinks)
+afr_selfheal_data_block(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int source, unsigned char *healed_sinks, off_t offset,
+ size_t size, int type, struct afr_reply *replies)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
-
- local = frame->local;
- priv = this->private;
-
- AFR_ONLIST (healed_sinks, frame, attr_cbk, fsync, fd, 0, NULL);
-
- for (i = 0; i < priv->child_count; i++)
- if (healed_sinks[i] && local->replies[i].op_ret != 0)
- /* fsync() failed. Do NOT consider this server
- as successfully healed. Mark it so.
- */
- healed_sinks[i] = 0;
- return 0;
-}
+ int ret = -1;
+ afr_private_t *priv = NULL;
+ unsigned char *data_lock = NULL;
+
+ priv = this->private;
+ data_lock = alloca0(priv->child_count);
+
+ ret = afr_selfheal_inodelk(frame, this, fd->inode, this->name, offset, size,
+ data_lock);
+ {
+ if (!afr_source_sinks_locked(this, data_lock, source, healed_sinks)) {
+ ret = -ENOTCONN;
+ goto unlock;
+ }
+ if (type == AFR_SELFHEAL_DATA_DIFF &&
+ __afr_can_skip_data_block_heal(frame, this, fd, source,
+ healed_sinks, offset, size,
+ &replies[source].poststat)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_data_read_write(
+ frame, this, fd, source, healed_sinks, offset, size, replies, type);
+ }
+unlock:
+ afr_selfheal_uninodelk(frame, this, fd->inode, this->name, offset, size,
+ data_lock);
+ return ret;
+}
static int
-afr_selfheal_data_restore_time (call_frame_t *frame, xlator_t *this,
- inode_t *inode, int source,
- unsigned char *healed_sinks,
- struct afr_reply *replies)
+afr_selfheal_data_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ unsigned char *healed_sinks)
{
- loc_t loc = {0, };
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
+ local = frame->local;
+ priv = this->private;
- AFR_ONLIST (healed_sinks, frame, attr_cbk, setattr, &loc,
- &replies[source].poststat,
- (GF_SET_ATTR_ATIME|GF_SET_ATTR_MTIME), NULL);
+ if (!priv->ensure_durability)
+ return 0;
- loc_wipe (&loc);
+ AFR_ONLIST(healed_sinks, frame, afr_sh_generic_fop_cbk, fsync, fd, 0, NULL);
- return 0;
+ for (i = 0; i < priv->child_count; i++)
+ if (healed_sinks[i] && local->replies[i].op_ret != 0)
+ /* fsync() failed. Do NOT consider this server
+ as successfully healed. Mark it so.
+ */
+ healed_sinks[i] = 0;
+ return 0;
}
static int
-afr_data_self_heal_type_get (afr_private_t *priv, unsigned char *healed_sinks,
- int source, struct afr_reply *replies)
+afr_data_self_heal_type_get(afr_private_t *priv, unsigned char *healed_sinks,
+ int source, struct afr_reply *replies)
{
- int type = AFR_SELFHEAL_DATA_FULL;
- int i = 0;
-
- if (priv->data_self_heal_algorithm == NULL) {
- type = AFR_SELFHEAL_DATA_FULL;
- for (i = 0; i < priv->child_count; i++) {
- if (!healed_sinks[i] && i != source)
- continue;
- if (replies[i].poststat.ia_size) {
- type = AFR_SELFHEAL_DATA_DIFF;
- break;
- }
- }
- } else if (strcmp (priv->data_self_heal_algorithm, "full") == 0) {
- type = AFR_SELFHEAL_DATA_FULL;
- } else if (strcmp (priv->data_self_heal_algorithm, "diff") == 0) {
+ int type = AFR_SELFHEAL_DATA_FULL;
+ int i = 0;
+
+ if (priv->data_self_heal_algorithm == AFR_SELFHEAL_DATA_DYNAMIC) {
+ type = AFR_SELFHEAL_DATA_FULL;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!healed_sinks[i] && i != source)
+ continue;
+ if (replies[i].poststat.ia_size) {
type = AFR_SELFHEAL_DATA_DIFF;
+ break;
+ }
}
- return type;
+ } else {
+ type = priv->data_self_heal_algorithm;
+ }
+ return type;
}
static int
-afr_selfheal_data_do (call_frame_t *frame, xlator_t *this, fd_t *fd,
- int source, unsigned char *healed_sinks,
- struct afr_reply *replies)
+afr_selfheal_data_do(call_frame_t *frame, xlator_t *this, fd_t *fd, int source,
+ unsigned char *healed_sinks, struct afr_reply *replies)
{
- afr_private_t *priv = NULL;
- off_t off = 0;
- size_t block = 128 * 1024;
- int type = AFR_SELFHEAL_DATA_FULL;
- int ret = -1;
- call_frame_t *iter_frame = NULL;
- unsigned char arbiter_sink_status = 0;
-
- priv = this->private;
- if (priv->arbiter_count) {
- arbiter_sink_status = healed_sinks[ARBITER_BRICK_INDEX];
- healed_sinks[ARBITER_BRICK_INDEX] = 0;
+ afr_private_t *priv = NULL;
+ off_t off = 0;
+ size_t block = 0;
+ int type = AFR_SELFHEAL_DATA_FULL;
+ int ret = -1;
+ call_frame_t *iter_frame = NULL;
+ unsigned char arbiter_sink_status = 0;
+
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
+ "performing data selfheal on %s", uuid_utoa(fd->inode->gfid));
+
+ priv = this->private;
+ if (priv->arbiter_count) {
+ arbiter_sink_status = healed_sinks[ARBITER_BRICK_INDEX];
+ healed_sinks[ARBITER_BRICK_INDEX] = 0;
+ }
+
+ block = 128 * 1024 * priv->data_self_heal_window_size;
+
+ type = afr_data_self_heal_type_get(priv, healed_sinks, source, replies);
+
+ iter_frame = afr_copy_frame(frame);
+ if (!iter_frame) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (off = 0; off < replies[source].poststat.ia_size; off += block) {
+ if (AFR_COUNT(healed_sinks, priv->child_count) == 0) {
+ ret = -ENOTCONN;
+ goto out;
}
- type = afr_data_self_heal_type_get (priv, healed_sinks, source,
- replies);
+ ret = afr_selfheal_data_block(iter_frame, this, fd, source,
+ healed_sinks, off, block, type, replies);
+ if (ret < 0)
+ goto out;
- iter_frame = afr_copy_frame (frame);
- if (!iter_frame) {
- ret = -ENOMEM;
- goto out;
+ AFR_STACK_RESET(iter_frame);
+ if (iter_frame->local == NULL) {
+ ret = -ENOTCONN;
+ goto out;
}
+ }
- for (off = 0; off < replies[source].poststat.ia_size; off += block) {
- if (AFR_COUNT (healed_sinks, priv->child_count) == 0) {
- ret = -ENOTCONN;
- goto out;
- }
-
- ret = afr_selfheal_data_block (iter_frame, this, fd, source,
- healed_sinks, off, block, type,
- replies);
- if (ret < 0)
- goto out;
-
- AFR_STACK_RESET (iter_frame);
- if (iter_frame->local == NULL) {
- ret = -ENOTCONN;
- goto out;
- }
- }
-
- ret = afr_selfheal_data_fsync (frame, this, fd, healed_sinks);
+ ret = afr_selfheal_data_fsync(frame, this, fd, healed_sinks);
out:
- if (arbiter_sink_status)
- healed_sinks[ARBITER_BRICK_INDEX] = arbiter_sink_status;
+ if (arbiter_sink_status)
+ healed_sinks[ARBITER_BRICK_INDEX] = arbiter_sink_status;
- if (iter_frame)
- AFR_STACK_DESTROY (iter_frame);
- return ret;
+ if (iter_frame)
+ AFR_STACK_DESTROY(iter_frame);
+ return ret;
}
-
static int
-__afr_selfheal_truncate_sinks (call_frame_t *frame, xlator_t *this,
- fd_t *fd, unsigned char *healed_sinks,
- uint64_t size)
+__afr_selfheal_truncate_sinks(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ unsigned char *healed_sinks, uint64_t size)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- unsigned char arbiter_sink_status = 0;
- int i = 0;
-
- local = frame->local;
- priv = this->private;
-
- if (priv->arbiter_count) {
- arbiter_sink_status = healed_sinks[ARBITER_BRICK_INDEX];
- healed_sinks[ARBITER_BRICK_INDEX] = 0;
- }
-
- AFR_ONLIST (healed_sinks, frame, attr_cbk, ftruncate, fd, size, NULL);
-
- for (i = 0; i < priv->child_count; i++)
- if (healed_sinks[i] && local->replies[i].op_ret == -1)
- /* truncate() failed. Do NOT consider this server
- as successfully healed. Mark it so.
- */
- healed_sinks[i] = 0;
-
- if (arbiter_sink_status)
- healed_sinks[ARBITER_BRICK_INDEX] = arbiter_sink_status;
- return 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ /* This will send truncate on the arbiter brick as well if it is marked as
+ * sink. If changelog is enabled on the volume it captures truncate as a
+ * data transactions on the arbiter brick. This will help geo-rep to
+ * properly sync the data from master to slave if arbiter is the ACTIVE
+ * brick during syncing and which had got some entries healed for data as
+ * part of self heal.
+ */
+ AFR_ONLIST(healed_sinks, frame, afr_sh_generic_fop_cbk, ftruncate, fd, size,
+ NULL);
+
+ for (i = 0; i < priv->child_count; i++)
+ if (healed_sinks[i] && local->replies[i].op_ret == -1)
+ /* truncate() failed. Do NOT consider this server
+ as successfully healed. Mark it so.
+ */
+ healed_sinks[i] = 0;
+
+ return 0;
}
gf_boolean_t
-afr_has_source_witnesses (xlator_t *this, unsigned char *sources,
- uint64_t *witness)
+afr_has_source_witnesses(xlator_t *this, unsigned char *sources,
+ uint64_t *witness)
{
- int i = 0;
- afr_private_t *priv = NULL;
+ int i = 0;
+ afr_private_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (sources[i] && witness[i])
- return _gf_true;
- }
- return _gf_false;
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i] && witness[i])
+ return _gf_true;
+ }
+ return _gf_false;
}
static gf_boolean_t
-afr_does_size_mismatch (xlator_t *this, unsigned char *sources,
- struct afr_reply *replies)
+afr_does_size_mismatch(xlator_t *this, unsigned char *sources,
+ struct afr_reply *replies)
{
- int i = 0;
- afr_private_t *priv = NULL;
- struct iatt *min = NULL;
- struct iatt *max = NULL;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ struct iatt *min = NULL;
+ struct iatt *max = NULL;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
- if (replies[i].op_ret < 0)
- continue;
+ if (replies[i].op_ret < 0)
+ continue;
- if (!sources[i])
- continue;
+ if (!sources[i])
+ continue;
- if (AFR_IS_ARBITER_BRICK (priv, i) &&
- (replies[i].poststat.ia_size == 0))
- continue;
+ if (AFR_IS_ARBITER_BRICK(priv, i) && (replies[i].poststat.ia_size == 0))
+ continue;
- if (!min)
- min = &replies[i].poststat;
+ if (!min)
+ min = &replies[i].poststat;
- if (!max)
- max = &replies[i].poststat;
+ if (!max)
+ max = &replies[i].poststat;
- if (min->ia_size > replies[i].poststat.ia_size)
- min = &replies[i].poststat;
+ if (min->ia_size > replies[i].poststat.ia_size)
+ min = &replies[i].poststat;
- if (max->ia_size < replies[i].poststat.ia_size)
- max = &replies[i].poststat;
- }
+ if (max->ia_size < replies[i].poststat.ia_size)
+ max = &replies[i].poststat;
+ }
- if (min && max) {
- if (min->ia_size != max->ia_size)
- return _gf_true;
- }
+ if (min && max) {
+ if (min->ia_size != max->ia_size)
+ return _gf_true;
+ }
- return _gf_false;
+ return _gf_false;
}
static void
-afr_mark_biggest_witness_as_source (xlator_t *this, unsigned char *sources,
- uint64_t *witness)
+afr_mark_biggest_witness_as_source(xlator_t *this, unsigned char *sources,
+ uint64_t *witness)
{
- int i = 0;
- afr_private_t *priv = NULL;
- uint64_t biggest_witness = 0;
-
- priv = this->private;
- /* Find source with biggest witness count */
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- if (biggest_witness < witness[i])
- biggest_witness = witness[i];
- }
-
- /* Mark files with less witness count as not source */
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
- if (witness[i] < biggest_witness)
- sources[i] = 0;
- }
-
- return;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ uint64_t biggest_witness = 0;
+
+ priv = this->private;
+ /* Find source with biggest witness count */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ if (biggest_witness < witness[i])
+ biggest_witness = witness[i];
+ }
+
+ /* Mark files with less witness count as not source */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ if (witness[i] < biggest_witness)
+ sources[i] = 0;
+ }
+
+ return;
}
/* This is a tie breaker function. Only one source be assigned here */
static void
-afr_mark_newest_file_as_source (xlator_t *this, unsigned char *sources,
- struct afr_reply *replies)
+afr_mark_newest_file_as_source(xlator_t *this, unsigned char *sources,
+ struct afr_reply *replies)
{
- int i = 0;
- afr_private_t *priv = NULL;
- int source = -1;
- uint32_t max_ctime = 0;
-
- priv = this->private;
- /* Find source with latest ctime */
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
-
- if (max_ctime <= replies[i].poststat.ia_ctime) {
- source = i;
- max_ctime = replies[i].poststat.ia_ctime;
- }
+ int i = 0;
+ afr_private_t *priv = NULL;
+ int source = -1;
+ uint32_t max_ctime = 0;
+
+ priv = this->private;
+ /* Find source with latest ctime */
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+
+ if (max_ctime <= replies[i].poststat.ia_ctime) {
+ source = i;
+ max_ctime = replies[i].poststat.ia_ctime;
}
+ }
- /* Only mark one of the files as source to break ties */
- memset (sources, 0, sizeof (*sources) * priv->child_count);
- sources[source] = 1;
+ /* Only mark one of the files as source to break ties */
+ memset(sources, 0, sizeof(*sources) * priv->child_count);
+ sources[source] = 1;
}
static int
-__afr_selfheal_data_finalize_source (call_frame_t *frame, xlator_t *this,
- unsigned char *sources,
- unsigned char *sinks,
- unsigned char *healed_sinks,
- unsigned char *locked_on,
- struct afr_reply *replies,
- uint64_t *witness)
+__afr_selfheal_data_finalize_source(
+ call_frame_t *frame, xlator_t *this, inode_t *inode, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks, unsigned char *locked_on,
+ unsigned char *undid_pending, struct afr_reply *replies, uint64_t *witness)
{
- afr_private_t *priv = NULL;
- int source = -1;
- int sources_count = 0;
- priv = this->private;
-
- sources_count = AFR_COUNT (sources, priv->child_count);
-
- if ((AFR_CMP (locked_on, healed_sinks, priv->child_count) == 0)
- || !sources_count) {
- /* split brain */
- source = afr_mark_split_brain_source_sinks (frame, this,
- sources, sinks,
- healed_sinks,
- locked_on, replies,
- AFR_DATA_TRANSACTION);
- if (source < 0)
- return -EIO;
- return source;
- }
-
- /* No split brain at this point. If we were called from
- * afr_heal_splitbrain_file(), abort.*/
- if (afr_dict_contains_heal_op(frame))
- return -EIO;
-
- /* If there are no witnesses/size-mismatches on sources we are done*/
- if (!afr_does_size_mismatch (this, sources, replies) &&
- !afr_has_source_witnesses (this, sources, witness))
- goto out;
-
- afr_mark_largest_file_as_source (this, sources, replies);
- afr_mark_biggest_witness_as_source (this, sources, witness);
- afr_mark_newest_file_as_source (this, sources, replies);
+ afr_private_t *priv = NULL;
+ int source = -1;
+ int sources_count = 0;
+ priv = this->private;
+
+ sources_count = AFR_COUNT(sources, priv->child_count);
+
+ if ((AFR_CMP(locked_on, healed_sinks, priv->child_count) == 0) ||
+ !sources_count) {
+ /* split brain */
+ source = afr_mark_split_brain_source_sinks(
+ frame, this, inode, sources, sinks, healed_sinks, locked_on,
+ replies, AFR_DATA_TRANSACTION);
+ if (source < 0) {
+ gf_event(EVENT_AFR_SPLIT_BRAIN,
+ "client-pid=%d;"
+ "subvol=%s;type=data;"
+ "file=%s",
+ this->ctx->cmd_args.client_pid, this->name,
+ uuid_utoa(inode->gfid));
+ return -EIO;
+ }
+
+ _afr_fav_child_reset_sink_xattrs(
+ frame, this, inode, source, healed_sinks, undid_pending,
+ AFR_DATA_TRANSACTION, locked_on, replies);
+ goto out;
+ }
+
+ /* No split brain at this point. If we were called from
+ * afr_heal_splitbrain_file(), abort.*/
+ if (afr_dict_contains_heal_op(frame))
+ return -EIO;
+
+ /* If there are no witnesses/size-mismatches on sources we are done*/
+ if (!afr_does_size_mismatch(this, sources, replies) &&
+ !afr_has_source_witnesses(this, sources, witness))
+ goto out;
+
+ afr_mark_largest_file_as_source(this, sources, replies);
+ afr_mark_biggest_witness_as_source(this, sources, witness);
+ afr_mark_newest_file_as_source(this, sources, replies);
+ if (priv->arbiter_count)
+ /* Choose non-arbiter brick as source for empty files. */
+ afr_mark_source_sinks_if_file_empty(this, sources, sinks, healed_sinks,
+ locked_on, replies,
+ AFR_DATA_TRANSACTION);
out:
- afr_mark_active_sinks (this, sources, locked_on, healed_sinks);
- source = afr_choose_source_by_policy (priv, sources,
- AFR_DATA_TRANSACTION);
+ afr_mark_active_sinks(this, sources, locked_on, healed_sinks);
+ source = afr_choose_source_by_policy(priv, sources, AFR_DATA_TRANSACTION);
- return source;
+ return source;
}
/*
@@ -624,245 +606,286 @@ out:
* for self-healing, or -1 if no healing is necessary/split brain.
*/
int
-__afr_selfheal_data_prepare (call_frame_t *frame, xlator_t *this,
- inode_t *inode, unsigned char *locked_on,
- unsigned char *sources, unsigned char *sinks,
- unsigned char *healed_sinks,
- struct afr_reply *replies, gf_boolean_t *pflag)
+__afr_selfheal_data_prepare(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ unsigned char *locked_on, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ struct afr_reply *replies, unsigned char *pflag)
{
- int ret = -1;
- int source = -1;
- afr_private_t *priv = NULL;
- uint64_t *witness = NULL;
-
- priv = this->private;
-
- ret = afr_selfheal_unlocked_discover (frame, inode, inode->gfid,
- replies);
-
- if (ret)
- return ret;
-
- witness = alloca0(priv->child_count * sizeof (*witness));
- ret = afr_selfheal_find_direction (frame, this, replies,
- AFR_DATA_TRANSACTION,
- locked_on, sources, sinks, witness,
- pflag);
- if (ret)
- return ret;
-
- /* Initialize the healed_sinks[] array optimistically to
- the intersection of to-be-healed (i.e sinks[]) and
- the list of servers which are up (i.e locked_on[]).
- As we encounter failures in the healing process, we
- will unmark the respective servers in the healed_sinks[]
- array.
- */
- AFR_INTERSECT (healed_sinks, sinks, locked_on, priv->child_count);
-
- source = __afr_selfheal_data_finalize_source (frame, this, sources,
- sinks, healed_sinks,
- locked_on, replies,
- witness);
- if (source < 0)
- return -EIO;
-
- return source;
+ int ret = -1;
+ int source = -1;
+ afr_private_t *priv = NULL;
+ uint64_t *witness = NULL;
+
+ priv = this->private;
+
+ ret = afr_selfheal_unlocked_discover(frame, inode, inode->gfid, replies);
+
+ if (ret)
+ return ret;
+
+ witness = alloca0(priv->child_count * sizeof(*witness));
+ ret = afr_selfheal_find_direction(frame, this, replies,
+ AFR_DATA_TRANSACTION, locked_on, sources,
+ sinks, witness, pflag);
+ if (ret)
+ return ret;
+
+ /* Initialize the healed_sinks[] array optimistically to
+ the intersection of to-be-healed (i.e sinks[]) and
+ the list of servers which are up (i.e locked_on[]).
+ As we encounter failures in the healing process, we
+ will unmark the respective servers in the healed_sinks[]
+ array.
+ */
+ AFR_INTERSECT(healed_sinks, sinks, locked_on, priv->child_count);
+
+ source = __afr_selfheal_data_finalize_source(
+ frame, this, inode, sources, sinks, healed_sinks, locked_on,
+ undid_pending, replies, witness);
+ if (source < 0)
+ return -EIO;
+
+ return source;
}
-
static int
-__afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd,
- unsigned char *locked_on)
+__afr_selfheal_data(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ unsigned char *locked_on)
{
- afr_private_t *priv = NULL;
- int ret = -1;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *data_lock = NULL;
- unsigned char *healed_sinks = NULL;
- struct afr_reply *locked_replies = NULL;
- int source = -1;
- gf_boolean_t did_sh = _gf_true;
- gf_boolean_t is_arbiter_the_only_sink = _gf_false;
-
- priv = this->private;
-
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
- data_lock = alloca0 (priv->child_count);
-
- locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count);
-
- ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name, 0, 0,
- data_lock);
- {
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- gf_msg_debug (this->name, 0, "%s: Skipping "
- "self-heal as only %d number "
- "of subvolumes "
- "could be locked",
- uuid_utoa (fd->inode->gfid),
- ret);
- ret = -ENOTCONN;
- goto unlock;
- }
-
- ret = __afr_selfheal_data_prepare (frame, this, fd->inode,
- data_lock, sources, sinks,
- healed_sinks,
- locked_replies,
- NULL);
- if (ret < 0)
- goto unlock;
-
- if (AFR_COUNT(healed_sinks, priv->child_count) == 0) {
- did_sh = _gf_false;
- goto unlock;
- }
-
- source = ret;
-
- if (AFR_IS_ARBITER_BRICK(priv, source)) {
- did_sh = _gf_false;
- goto unlock;
- }
-
- if (priv->arbiter_count &&
- AFR_COUNT (healed_sinks, priv->child_count) == 1 &&
- healed_sinks[ARBITER_BRICK_INDEX]) {
- is_arbiter_the_only_sink = _gf_true;
- goto restore_time;
- }
-
- ret = __afr_selfheal_truncate_sinks (frame, this, fd, healed_sinks,
- locked_replies[source].poststat.ia_size);
- if (ret < 0)
- goto unlock;
-
- ret = 0;
-
- }
-unlock:
- afr_selfheal_uninodelk (frame, this, fd->inode, this->name, 0, 0,
- data_lock);
+ afr_private_t *priv = NULL;
+ int ret = -1;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ unsigned char *data_lock = NULL;
+ unsigned char *healed_sinks = NULL;
+ unsigned char *undid_pending = NULL;
+ struct afr_reply *locked_replies = NULL;
+ int source = -1;
+ gf_boolean_t did_sh = _gf_true;
+ gf_boolean_t is_arbiter_the_only_sink = _gf_false;
+ gf_boolean_t empty_file = _gf_false;
+
+ priv = this->private;
+
+ sources = alloca0(priv->child_count);
+ sinks = alloca0(priv->child_count);
+ healed_sinks = alloca0(priv->child_count);
+ data_lock = alloca0(priv->child_count);
+ undid_pending = alloca0(priv->child_count);
+
+ locked_replies = alloca0(sizeof(*locked_replies) * priv->child_count);
+
+ ret = afr_selfheal_inodelk(frame, this, fd->inode, this->name, 0, 0,
+ data_lock);
+ {
+ if (ret < priv->child_count) {
+ gf_msg_debug(this->name, 0,
+ "%s: Skipping "
+ "self-heal as only %d number "
+ "of subvolumes "
+ "could be locked",
+ uuid_utoa(fd->inode->gfid), ret);
+ ret = -ENOTCONN;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_data_prepare(frame, this, fd->inode, data_lock,
+ sources, sinks, healed_sinks,
+ undid_pending, locked_replies, NULL);
if (ret < 0)
- goto out;
+ goto unlock;
- if (!did_sh)
- goto out;
+ if (AFR_COUNT(healed_sinks, priv->child_count) == 0) {
+ did_sh = _gf_false;
+ goto unlock;
+ }
+
+ source = ret;
+
+ if (AFR_IS_ARBITER_BRICK(priv, source)) {
+ empty_file = afr_is_file_empty_on_all_children(priv,
+ locked_replies);
+ if (empty_file)
+ goto restore_time;
+
+ did_sh = _gf_false;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_truncate_sinks(
+ frame, this, fd, healed_sinks,
+ locked_replies[source].poststat.ia_size);
+ if (ret < 0)
+ goto unlock;
+
+ if (priv->arbiter_count &&
+ AFR_COUNT(healed_sinks, priv->child_count) == 1 &&
+ healed_sinks[ARBITER_BRICK_INDEX]) {
+ is_arbiter_the_only_sink = _gf_true;
+ goto restore_time;
+ }
+ ret = 0;
+ }
+unlock:
+ afr_selfheal_uninodelk(frame, this, fd->inode, this->name, 0, 0, data_lock);
+ if (ret < 0)
+ goto out;
+
+ if (!did_sh)
+ goto out;
- ret = afr_selfheal_data_do (frame, this, fd, source, healed_sinks,
- locked_replies);
- if (ret)
- goto out;
+ ret = afr_selfheal_data_do(frame, this, fd, source, healed_sinks,
+ locked_replies);
+ if (ret)
+ goto out;
restore_time:
- afr_selfheal_data_restore_time (frame, this, fd->inode, source,
- healed_sinks, locked_replies);
-
- if (!is_arbiter_the_only_sink) {
- ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name,
- 0, 0, data_lock);
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- ret = -ENOTCONN;
- did_sh = _gf_false;
- goto skip_undo_pending;
- }
+ afr_selfheal_restore_time(frame, this, fd->inode, source, healed_sinks,
+ locked_replies);
+
+ if (!is_arbiter_the_only_sink && !empty_file) {
+ ret = afr_selfheal_inodelk(frame, this, fd->inode, this->name, 0, 0,
+ data_lock);
+ if (ret < priv->child_count) {
+ ret = -ENOTCONN;
+ did_sh = _gf_false;
+ goto skip_undo_pending;
}
- ret = afr_selfheal_undo_pending (frame, this, fd->inode,
- sources, sinks, healed_sinks,
- AFR_DATA_TRANSACTION,
- locked_replies, data_lock);
+ }
+ ret = afr_selfheal_undo_pending(
+ frame, this, fd->inode, sources, sinks, healed_sinks, undid_pending,
+ AFR_DATA_TRANSACTION, locked_replies, data_lock);
skip_undo_pending:
- afr_selfheal_uninodelk (frame, this, fd->inode, this->name, 0, 0,
- data_lock);
+ afr_selfheal_uninodelk(frame, this, fd->inode, this->name, 0, 0, data_lock);
out:
- if (did_sh)
- afr_log_selfheal (fd->inode->gfid, this, ret, "data", source,
- healed_sinks);
- else
- ret = 1;
+ if (did_sh)
+ afr_log_selfheal(fd->inode->gfid, this, ret, "data", source, sources,
+ healed_sinks);
+ else
+ ret = 1;
- if (locked_replies)
- afr_replies_wipe (locked_replies, priv->child_count);
+ if (locked_replies)
+ afr_replies_wipe(locked_replies, priv->child_count);
- return ret;
+ return ret;
}
+int
+afr_selfheal_data_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd,
+ dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int i = (long)cookie;
+
+ local = frame->local;
+
+ local->replies[i].valid = 1;
+ local->replies[i].op_ret = op_ret;
+ local->replies[i].op_errno = op_errno;
+
+ syncbarrier_wake(&local->barrier);
+
+ return 0;
+}
int
-afr_selfheal_data_open (xlator_t *this, inode_t *inode, fd_t **fd)
+afr_selfheal_data_open(xlator_t *this, inode_t *inode, fd_t **fd)
{
- int ret = 0;
- fd_t *fd_tmp = NULL;
- loc_t loc = {0,};
-
- fd_tmp = fd_create (inode, 0);
- if (!fd_tmp)
- return -ENOMEM;
-
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
-
- ret = syncop_open (this, &loc, O_RDWR|O_LARGEFILE, fd_tmp, NULL, NULL);
- if (ret < 0) {
- fd_unref (fd_tmp);
- loc_wipe (&loc);
- goto out;
- } else {
- fd_bind (fd_tmp);
- }
-
- *fd = fd_tmp;
+ int ret = 0;
+ fd_t *fd_tmp = NULL;
+ loc_t loc = {
+ 0,
+ };
+ call_frame_t *frame = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+
+ priv = this->private;
+
+ fd_tmp = fd_create(inode, 0);
+ if (!fd_tmp)
+ return -ENOMEM;
+
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
+
+ frame = afr_frame_create(this, &ret);
+ if (!frame) {
+ ret = -ret;
+ fd_unref(fd_tmp);
+ goto out;
+ }
+ local = frame->local;
+
+ AFR_ONLIST(local->child_up, frame, afr_selfheal_data_open_cbk, open, &loc,
+ O_RDWR | O_LARGEFILE, fd_tmp, NULL);
+
+ ret = -ENOTCONN;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+
+ if (local->replies[i].op_ret < 0) {
+ ret = -local->replies[i].op_errno;
+ continue;
+ }
+
+ ret = 0;
+ break;
+ }
+
+ if (ret < 0) {
+ fd_unref(fd_tmp);
+ goto out;
+ } else {
+ fd_bind(fd_tmp);
+ }
+
+ *fd = fd_tmp;
out:
- return ret;
+ loc_wipe(&loc);
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ return ret;
}
int
-afr_selfheal_data (call_frame_t *frame, xlator_t *this, inode_t *inode)
+afr_selfheal_data(call_frame_t *frame, xlator_t *this, fd_t *fd)
{
- afr_private_t *priv = NULL;
- unsigned char *locked_on = NULL;
- int ret = 0;
- fd_t *fd = NULL;
-
- priv = this->private;
-
- ret = afr_selfheal_data_open (this, inode, &fd);
- if (!fd) {
- gf_msg_debug (this->name, -ret, "%s: Failed to open",
- uuid_utoa (inode->gfid));
- return -EIO;
+ afr_private_t *priv = NULL;
+ unsigned char *locked_on = NULL;
+ int ret = 0;
+ inode_t *inode = fd->inode;
+
+ priv = this->private;
+
+ locked_on = alloca0(priv->child_count);
+
+ ret = afr_selfheal_tie_breaker_inodelk(frame, this, inode, priv->sh_domain,
+ 0, 0, locked_on);
+ {
+ if (ret < priv->child_count) {
+ gf_msg_debug(this->name, 0,
+ "%s: Skipping "
+ "self-heal as only %d number of "
+ "subvolumes could be locked",
+ uuid_utoa(fd->inode->gfid), ret);
+ /* Either less than two subvols available, or another
+ selfheal (from another server) is in progress. Skip
+ for now in any case there isn't anything to do.
+ */
+ ret = -ENOTCONN;
+ goto unlock;
}
- locked_on = alloca0 (priv->child_count);
-
- ret = afr_selfheal_tryinodelk (frame, this, inode, priv->sh_domain, 0, 0,
- locked_on);
- {
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- gf_msg_debug (this->name, 0, "%s: Skipping "
- "self-heal as only %d number of "
- "subvolumes could be locked",
- uuid_utoa (fd->inode->gfid),
- ret);
- /* Either less than two subvols available, or another
- selfheal (from another server) is in progress. Skip
- for now in any case there isn't anything to do.
- */
- ret = -ENOTCONN;
- goto unlock;
- }
-
- ret = __afr_selfheal_data (frame, this, fd, locked_on);
- }
+ ret = __afr_selfheal_data(frame, this, fd, locked_on);
+ }
unlock:
- afr_selfheal_uninodelk (frame, this, inode, priv->sh_domain, 0, 0, locked_on);
-
- if (fd)
- fd_unref (fd);
+ afr_selfheal_uninodelk(frame, this, inode, priv->sh_domain, 0, 0,
+ locked_on);
- return ret;
+ return ret;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
index e4d616ea20a..64893f441e3 100644
--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
@@ -8,793 +8,1269 @@
cases as published by the Free Software Foundation.
*/
-
#include "afr.h"
#include "afr-self-heal.h"
-#include "byte-order.h"
+#include <glusterfs/byte-order.h>
#include "afr-transaction.h"
#include "afr-messages.h"
+#include <glusterfs/syncop-utils.h>
+#include <glusterfs/events.h>
-/* Max file name length is 255 this filename is of length 256. No file with
- * this name can ever come, entry-lock with this name is going to prevent
- * self-heals from older versions while the granular entry-self-heal is going
- * on in newer version.*/
-#define LONG_FILENAME "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
-
-static int
-afr_selfheal_entry_delete (xlator_t *this, inode_t *dir, const char *name,
- inode_t *inode, int child, struct afr_reply *replies)
+int
+afr_selfheal_entry_anon_inode(xlator_t *this, inode_t *dir, const char *name,
+ inode_t *inode, int child,
+ struct afr_reply *replies,
+ gf_boolean_t *anon_inode)
{
- afr_private_t *priv = NULL;
- xlator_t *subvol = NULL;
- int ret = 0;
- loc_t loc = {0, };
- char g[64];
-
- priv = this->private;
-
- subvol = priv->children[child];
-
- loc.parent = inode_ref (dir);
- gf_uuid_copy (loc.pargfid, dir->gfid);
- loc.name = name;
- loc.inode = inode_ref (inode);
-
- if (replies[child].valid && replies[child].op_ret == 0) {
- switch (replies[child].poststat.ia_type) {
- case IA_IFDIR:
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_EXPUNGING_FILE_OR_DIR,
- "expunging dir %s/%s (%s) on %s",
- uuid_utoa (dir->gfid), name,
- uuid_utoa_r (replies[child].poststat.ia_gfid, g),
- subvol->name);
- ret = syncop_rmdir (subvol, &loc, 1, NULL, NULL);
- break;
- default:
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_EXPUNGING_FILE_OR_DIR,
- "expunging file %s/%s (%s) on %s",
- uuid_utoa (dir->gfid), name,
- uuid_utoa_r (replies[child].poststat.ia_gfid, g),
- subvol->name);
- ret = syncop_unlink (subvol, &loc, NULL, NULL);
- break;
- }
- }
-
- loc_wipe (&loc);
-
- return ret;
-}
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ xlator_t *subvol = NULL;
+ int ret = 0;
+ int i = 0;
+ char g[64] = {0};
+ unsigned char *lookup_success = NULL;
+ call_frame_t *frame = NULL;
+ loc_t loc2 = {
+ 0,
+ };
+ loc_t loc = {
+ 0,
+ };
+
+ priv = this->private;
+ subvol = priv->children[child];
+ lookup_success = alloca0(priv->child_count);
+ uuid_utoa_r(replies[child].poststat.ia_gfid, g);
+ loc.inode = inode_new(inode->table);
+ if (!loc.inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (replies[child].poststat.ia_type == IA_IFDIR) {
+ /* This directory may have sub-directory hierarchy which may need to
+ * be preserved for subsequent heals. So unconditionally move the
+ * directory to anonymous-inode directory*/
+ *anon_inode = _gf_true;
+ goto anon_inode;
+ }
+
+ frame = afr_frame_create(this, &ret);
+ if (!frame) {
+ ret = -ret;
+ goto out;
+ }
+ local = frame->local;
+ gf_uuid_copy(loc.gfid, replies[child].poststat.ia_gfid);
+ AFR_ONLIST(local->child_up, frame, afr_selfheal_discover_cbk, lookup, &loc,
+ NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].op_ret == 0) {
+ lookup_success[i] = 1;
+ } else if (local->replies[i].op_errno != ENOENT &&
+ local->replies[i].op_errno != ESTALE) {
+ ret = -local->replies[i].op_errno;
+ }
+ }
+ if (priv->quorum_count) {
+ if (afr_has_quorum(lookup_success, this, NULL)) {
+ *anon_inode = _gf_true;
+ }
+ } else if (AFR_COUNT(lookup_success, priv->child_count) > 1) {
+ *anon_inode = _gf_true;
+ } else if (ret) {
+ goto out;
+ }
+
+anon_inode:
+ if (!*anon_inode) {
+ ret = 0;
+ goto out;
+ }
+
+ loc.parent = inode_ref(dir);
+ gf_uuid_copy(loc.pargfid, dir->gfid);
+ loc.name = name;
+
+ ret = afr_anon_inode_create(this, child, &loc2.parent);
+ if (ret < 0)
+ goto out;
+
+ loc2.name = g;
+ ret = syncop_rename(subvol, &loc, &loc2, NULL, NULL);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_EXPUNGING_FILE_OR_DIR,
+ "Rename to %s dir %s/%s (%s) on %s failed",
+ priv->anon_inode_name, uuid_utoa(dir->gfid), name, g,
+ subvol->name);
+ } else {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_EXPUNGING_FILE_OR_DIR,
+ "Rename to %s dir %s/%s (%s) on %s successful",
+ priv->anon_inode_name, uuid_utoa(dir->gfid), name, g,
+ subvol->name);
+ }
+
+out:
+ loc_wipe(&loc);
+ loc_wipe(&loc2);
+ if (frame) {
+ AFR_STACK_DESTROY(frame);
+ }
+
+ return ret;
+}
int
-afr_selfheal_recreate_entry (xlator_t *this, int dst, int source, inode_t *dir,
- const char *name, inode_t *inode,
- struct afr_reply *replies,
- unsigned char *newentry)
+afr_selfheal_entry_delete(xlator_t *this, inode_t *dir, const char *name,
+ inode_t *inode, int child, struct afr_reply *replies)
{
- int ret = 0;
- loc_t loc = {0,};
- loc_t srcloc = {0,};
- afr_private_t *priv = NULL;
- dict_t *xdata = NULL;
- struct iatt *iatt = NULL;
- char *linkname = NULL;
- mode_t mode = 0;
- struct iatt newent = {0,};
- priv = this->private;
-
- xdata = dict_new();
- if (!xdata)
- return -ENOMEM;
-
- loc.parent = inode_ref (dir);
- gf_uuid_copy (loc.pargfid, dir->gfid);
- loc.name = name;
- loc.inode = inode_ref (inode);
-
- ret = afr_selfheal_entry_delete (this, dir, name, inode, dst, replies);
- if (ret)
- goto out;
-
- ret = dict_set_static_bin (xdata, "gfid-req",
- replies[source].poststat.ia_gfid, 16);
- if (ret)
- goto out;
-
- iatt = &replies[source].poststat;
-
- srcloc.inode = inode_ref (inode);
- gf_uuid_copy (srcloc.gfid, iatt->ia_gfid);
-
- mode = st_mode_from_ia (iatt->ia_prot, iatt->ia_type);
-
- switch (iatt->ia_type) {
- case IA_IFDIR:
- ret = syncop_mkdir (priv->children[dst], &loc, mode, 0,
- xdata, NULL);
- if (ret == 0)
- newentry[dst] = 1;
- break;
- case IA_IFLNK:
- ret = syncop_lookup (priv->children[dst], &srcloc, 0, 0, 0, 0);
- if (ret == 0) {
- ret = syncop_link (priv->children[dst], &srcloc, &loc,
- &newent, NULL, NULL);
- } else {
- ret = syncop_readlink (priv->children[source], &srcloc,
- &linkname, 4096, NULL, NULL);
- if (ret <= 0)
- goto out;
- ret = syncop_symlink (priv->children[dst], &loc,
- linkname, NULL, xdata, NULL);
- if (ret == 0)
- newentry[dst] = 1;
- }
- break;
- default:
- ret = dict_set_int32 (xdata, GLUSTERFS_INTERNAL_FOP_KEY, 1);
- if (ret)
- goto out;
- ret = syncop_mknod (priv->children[dst], &loc, mode,
- iatt->ia_rdev, &newent, xdata, NULL);
- if (ret == 0 && newent.ia_nlink == 1) {
- /* New entry created. Mark @dst pending on all sources */
- newentry[dst] = 1;
- }
- break;
- }
+ char g[64] = {0};
+ afr_private_t *priv = NULL;
+ xlator_t *subvol = NULL;
+ int ret = 0;
+ loc_t loc = {
+ 0,
+ };
+ gf_boolean_t anon_inode = _gf_false;
+
+ priv = this->private;
+ subvol = priv->children[child];
+
+ if ((!replies[child].valid) || (replies[child].op_ret < 0)) {
+ /*Nothing to do*/
+ ret = 0;
+ goto out;
+ }
+
+ if (priv->use_anon_inode) {
+ ret = afr_selfheal_entry_anon_inode(this, dir, name, inode, child,
+ replies, &anon_inode);
+ if (ret < 0 || anon_inode)
+ goto out;
+ }
+
+ loc.parent = inode_ref(dir);
+ loc.inode = inode_new(inode->table);
+ if (!loc.inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ loc.name = name;
+ switch (replies[child].poststat.ia_type) {
+ case IA_IFDIR:
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_EXPUNGING_FILE_OR_DIR,
+ "expunging dir %s/%s (%s) on %s", uuid_utoa(dir->gfid), name,
+ uuid_utoa_r(replies[child].poststat.ia_gfid, g),
+ subvol->name);
+ ret = syncop_rmdir(subvol, &loc, 1, NULL, NULL);
+ break;
+ default:
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_EXPUNGING_FILE_OR_DIR,
+ "expunging file %s/%s (%s) on %s", uuid_utoa(dir->gfid),
+ name, uuid_utoa_r(replies[child].poststat.ia_gfid, g),
+ subvol->name);
+ ret = syncop_unlink(subvol, &loc, NULL, NULL);
+ break;
+ }
out:
- if (xdata)
- dict_unref (xdata);
- GF_FREE (linkname);
- loc_wipe (&loc);
- loc_wipe (&srcloc);
- return ret;
+ loc_wipe(&loc);
+ return ret;
}
+int
+afr_selfheal_recreate_entry(call_frame_t *frame, int dst, int source,
+ unsigned char *sources, inode_t *dir,
+ const char *name, inode_t *inode,
+ struct afr_reply *replies)
+{
+ int ret = 0;
+ loc_t loc = {
+ 0,
+ };
+ loc_t srcloc = {
+ 0,
+ };
+ loc_t anonloc = {
+ 0,
+ };
+ xlator_t *this = frame->this;
+ afr_private_t *priv = NULL;
+ dict_t *xdata = NULL;
+ struct iatt *iatt = NULL;
+ char *linkname = NULL;
+ mode_t mode = 0;
+ struct iatt newent = {
+ 0,
+ };
+ unsigned char *newentry = NULL;
+ char iatt_uuid_str[64] = {0};
+ char dir_uuid_str[64] = {0};
+
+ priv = this->private;
+ iatt = &replies[source].poststat;
+ uuid_utoa_r(iatt->ia_gfid, iatt_uuid_str);
+ if (iatt->ia_type == IA_INVAL || gf_uuid_is_null(iatt->ia_gfid)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SELF_HEAL_FAILED,
+ "Invalid ia_type (%d) or gfid(%s). source brick=%d, "
+ "pargfid=%s, name=%s",
+ iatt->ia_type, iatt_uuid_str, source,
+ uuid_utoa_r(dir->gfid, dir_uuid_str), name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ xdata = dict_new();
+ if (!xdata)
+ return -ENOMEM;
+ newentry = alloca0(priv->child_count);
+ loc.parent = inode_ref(dir);
+ gf_uuid_copy(loc.pargfid, dir->gfid);
+ loc.name = name;
+ loc.inode = inode_ref(inode);
+
+ ret = afr_selfheal_entry_delete(this, dir, name, inode, dst, replies);
+ if (ret)
+ goto out;
+
+ ret = dict_set_gfuuid(xdata, "gfid-req", replies[source].poststat.ia_gfid,
+ true);
+ if (ret)
+ goto out;
+
+ srcloc.inode = inode_ref(inode);
+ gf_uuid_copy(srcloc.gfid, iatt->ia_gfid);
+ ret = syncop_lookup(priv->children[dst], &srcloc, 0, 0, 0, 0);
+ if (ret == -ENOENT || ret == -ESTALE) {
+ newentry[dst] = 1;
+ ret = afr_selfheal_newentry_mark(frame, this, inode, source, replies,
+ sources, newentry);
+ if (ret)
+ goto out;
+ } else if (ret == 0 && iatt->ia_type == IA_IFDIR && priv->use_anon_inode) {
+ // Try rename from hidden directory
+ ret = afr_anon_inode_create(this, dst, &anonloc.parent);
+ if (ret < 0)
+ goto out;
+ anonloc.inode = inode_ref(inode);
+ anonloc.name = iatt_uuid_str;
+ ret = syncop_rename(priv->children[dst], &anonloc, &loc, NULL, NULL);
+ if (ret == -ENOENT || ret == -ESTALE)
+ ret = -1; /*This sets 'mismatch' to true*/
+ goto out;
+ }
+
+ mode = st_mode_from_ia(iatt->ia_prot, iatt->ia_type);
+
+ switch (iatt->ia_type) {
+ case IA_IFDIR:
+ ret = syncop_mkdir(priv->children[dst], &loc, mode, 0, xdata, NULL);
+ break;
+ case IA_IFLNK:
+ if (!newentry[dst]) {
+ ret = syncop_link(priv->children[dst], &srcloc, &loc, &newent,
+ NULL, NULL);
+ } else {
+ ret = syncop_readlink(priv->children[source], &srcloc,
+ &linkname, 4096, NULL, NULL);
+ if (ret <= 0)
+ goto out;
+ ret = syncop_symlink(priv->children[dst], &loc, linkname, NULL,
+ xdata, NULL);
+ }
+ break;
+ default:
+ ret = dict_set_int32_sizen(xdata, GLUSTERFS_INTERNAL_FOP_KEY, 1);
+ if (ret)
+ goto out;
+ ret = syncop_mknod(
+ priv->children[dst], &loc, mode,
+ makedev(ia_major(iatt->ia_rdev), ia_minor(iatt->ia_rdev)),
+ &newent, xdata, NULL);
+ break;
+ }
+
+out:
+ if (xdata)
+ dict_unref(xdata);
+ GF_FREE(linkname);
+ loc_wipe(&loc);
+ loc_wipe(&srcloc);
+ loc_wipe(&anonloc);
+ return ret;
+}
static int
-__afr_selfheal_heal_dirent (call_frame_t *frame, xlator_t *this, fd_t *fd,
- char *name, inode_t *inode, int source,
- unsigned char *sources, unsigned char *healed_sinks,
- unsigned char *locked_on, struct afr_reply *replies)
+__afr_selfheal_heal_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ char *name, inode_t *inode, int source,
+ unsigned char *sources, unsigned char *healed_sinks,
+ unsigned char *locked_on, struct afr_reply *replies)
{
- int ret = 0;
- afr_private_t *priv = NULL;
- int i = 0;
- unsigned char *newentry = NULL;
+ int ret = 0;
+ afr_private_t *priv = NULL;
+ int i = 0;
- priv = this->private;
+ priv = this->private;
- newentry = alloca0 (priv->child_count);
+ if (!replies[source].valid)
+ return -EIO;
- if (!replies[source].valid)
- return -EIO;
+ /* Skip healing this entry if the last lookup on it failed for reasons
+ * other than ENOENT.
+ */
+ if ((replies[source].op_ret < 0) && (replies[source].op_errno != ENOENT))
+ return -replies[source].op_errno;
- /* Skip healing this entry if the last lookup on it failed for reasons
- * other than ENOENT.
- */
- if ((replies[source].op_ret < 0) &&
- (replies[source].op_errno != ENOENT))
- return -replies[source].op_errno;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!healed_sinks[i])
- continue;
- if (replies[source].op_ret == -1 &&
- replies[source].op_errno == ENOENT) {
- ret = afr_selfheal_entry_delete (this, fd->inode, name,
- inode, i, replies);
- } else {
- if (!gf_uuid_compare (replies[i].poststat.ia_gfid,
- replies[source].poststat.ia_gfid))
- continue;
-
- ret = afr_selfheal_recreate_entry (this, i, source,
- fd->inode, name, inode,
- replies, newentry);
- }
- if (ret < 0)
- break;
- }
-
- if (AFR_COUNT (newentry, priv->child_count))
- afr_selfheal_newentry_mark (frame, this, inode, source, replies,
- sources, newentry);
- return ret;
+ if (replies[source].op_ret == 0) {
+ ret = afr_lookup_and_heal_gfid(this, fd->inode, name, inode, replies,
+ source, sources,
+ &replies[source].poststat.ia_gfid, NULL);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!healed_sinks[i])
+ continue;
+ if (replies[source].op_ret == -1 &&
+ replies[source].op_errno == ENOENT) {
+ ret = afr_selfheal_entry_delete(this, fd->inode, name, inode, i,
+ replies);
+ } else {
+ if (!gf_uuid_compare(replies[i].poststat.ia_gfid,
+ replies[source].poststat.ia_gfid))
+ continue;
+
+ ret = afr_selfheal_recreate_entry(frame, i, source, sources,
+ fd->inode, name, inode, replies);
+ }
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
}
static int
-afr_selfheal_detect_gfid_and_type_mismatch (xlator_t *this,
- struct afr_reply *replies,
- uuid_t pargfid, char *bname,
- int src_idx)
+afr_selfheal_detect_gfid_and_type_mismatch(xlator_t *this,
+ struct afr_reply *replies,
+ inode_t *inode, uuid_t pargfid,
+ char *bname, int src_idx,
+ unsigned char *locked_on, int *src)
{
- int i = 0;
- char g1[64] = {0,};
- char g2[64] = {0,};
- afr_private_t *priv = NULL;
+ int i = 0;
+ int ret = -1;
+ afr_private_t *priv = NULL;
+ void *gfid = NULL;
+ ia_type_t ia_type = IA_INVAL;
- priv = this->private;
+ priv = this->private;
+ gfid = &replies[src_idx].poststat.ia_gfid;
+ ia_type = replies[src_idx].poststat.ia_type;
- for (i = 0; i < priv->child_count; i++) {
- if (i == src_idx)
- continue;
-
- if (!replies[i].valid)
- continue;
-
- if (replies[i].op_ret != 0)
- continue;
-
- if (gf_uuid_compare (replies[src_idx].poststat.ia_gfid,
- replies[i].poststat.ia_gfid)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SPLIT_BRAIN, "Gfid mismatch "
- "detected for <%s/%s>, %s on %s and %s on %s. "
- "Skipping conservative merge on the file.",
- uuid_utoa (pargfid), bname,
- uuid_utoa_r (replies[i].poststat.ia_gfid, g1),
- priv->children[i]->name,
- uuid_utoa_r (replies[src_idx].poststat.ia_gfid,
- g2), priv->children[src_idx]->name);
- return -1;
- }
-
- if ((replies[src_idx].poststat.ia_type) !=
- (replies[i].poststat.ia_type)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SPLIT_BRAIN, "Type mismatch "
- "detected for <%s/%s>, %d on %s and %d on %s. "
- "Skipping conservative merge on the file.",
- uuid_utoa (pargfid), bname,
- replies[i].poststat.ia_type,
- priv->children[i]->name,
- replies[src_idx].poststat.ia_type,
- priv->children[src_idx]->name);
- return -1;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (i == src_idx)
+ continue;
+
+ if (!replies[i].valid)
+ continue;
+
+ if (replies[i].op_ret != 0)
+ continue;
+
+ if (gf_uuid_is_null(replies[i].poststat.ia_gfid))
+ continue;
+
+ if (replies[i].poststat.ia_type == IA_INVAL)
+ continue;
+
+ if (ia_type == IA_INVAL || gf_uuid_is_null(gfid)) {
+ src_idx = i;
+ ia_type = replies[src_idx].poststat.ia_type;
+ gfid = &replies[src_idx].poststat.ia_gfid;
+ continue;
}
- return 0;
+ if (gf_uuid_compare(gfid, replies[i].poststat.ia_gfid) &&
+ (ia_type == replies[i].poststat.ia_type)) {
+ ret = afr_gfid_split_brain_source(this, replies, inode, pargfid,
+ bname, src_idx, i, locked_on, src,
+ NULL);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "Skipping conservative merge on the "
+ "file.");
+ return ret;
+ }
+
+ if (ia_type != replies[i].poststat.ia_type) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+ "Type mismatch detected "
+ "for <gfid:%s>/%s>, %s on %s and %s on %s. "
+ "Skipping conservative merge on the file.",
+ uuid_utoa(pargfid), bname,
+ gf_inode_type_to_str(replies[i].poststat.ia_type),
+ priv->children[i]->name,
+ gf_inode_type_to_str(replies[src_idx].poststat.ia_type),
+ priv->children[src_idx]->name);
+ gf_event(EVENT_AFR_SPLIT_BRAIN,
+ "client-pid=%d;"
+ "subvol=%s;type=file;"
+ "file=<gfid:%s>/%s>;count=2;child-%d=%s;type-"
+ "%d=%s;child-%d=%s;type-%d=%s",
+ this->ctx->cmd_args.client_pid, this->name,
+ uuid_utoa(pargfid), bname, i, priv->children[i]->name, i,
+ gf_inode_type_to_str(replies[i].poststat.ia_type), src_idx,
+ priv->children[src_idx]->name, src_idx,
+ gf_inode_type_to_str(replies[src_idx].poststat.ia_type));
+ return -1;
+ }
+ }
+
+ return 0;
}
static int
-__afr_selfheal_merge_dirent (call_frame_t *frame, xlator_t *this, fd_t *fd,
- char *name, inode_t *inode, unsigned char *sources,
- unsigned char *healed_sinks, unsigned char *locked_on,
- struct afr_reply *replies)
+__afr_selfheal_merge_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ char *name, inode_t *inode, unsigned char *sources,
+ unsigned char *healed_sinks,
+ unsigned char *locked_on, struct afr_reply *replies)
{
- int ret = 0;
- int i = 0;
- int source = -1;
- unsigned char *newentry = NULL;
- afr_private_t *priv = NULL;
-
- priv = this->private;
-
- newentry = alloca0 (priv->child_count);
-
- for (i = 0; i < priv->child_count; i++) {
- if (replies[i].valid && replies[i].op_ret == 0) {
- source = i;
- break;
- }
- }
-
- if (source == -1) {
- /* entry got deleted in the mean time? */
- return 0;
- }
-
- /* Set all the sources as 1, otheriwse newentry_mark won't be set */
- for (i = 0; i < priv->child_count; i++) {
- if (replies[i].valid && replies[i].op_ret == 0) {
- sources[i] = 1;
- }
- }
-
- /* In case of a gfid or type mismatch on the entry, return -1.*/
- ret = afr_selfheal_detect_gfid_and_type_mismatch (this, replies,
- fd->inode->gfid,
- name, source);
+ int ret = 0;
+ int i = 0;
+ int source = -1;
+ int src = -1;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid && replies[i].op_ret == 0) {
+ source = i;
+ break;
+ }
+ }
- if (ret < 0)
- return ret;
+ if (source == -1) {
+ /* entry got deleted in the mean time? */
+ return 0;
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (i == source || !healed_sinks[i])
- continue;
+ /* Set all the sources as 1, otheriwse newentry_mark won't be set */
+ for (i = 0; i < priv->child_count; i++) {
+ if (replies[i].valid && replies[i].op_ret == 0) {
+ sources[i] = 1;
+ }
+ }
+
+ ret = afr_lookup_and_heal_gfid(this, fd->inode, name, inode, replies,
+ source, sources,
+ &replies[source].poststat.ia_gfid, NULL);
+ if (ret)
+ return ret;
+
+ /* In case of type mismatch / unable to resolve gfid mismatch on the
+ * entry, return -1.*/
+ ret = afr_selfheal_detect_gfid_and_type_mismatch(
+ this, replies, inode, fd->inode->gfid, name, source, locked_on, &src);
+
+ if (ret < 0)
+ return ret;
+ if (src != -1) {
+ source = src;
+ for (i = 0; i < priv->child_count; i++) {
+ if (i != src && replies[i].valid &&
+ gf_uuid_compare(replies[src].poststat.ia_gfid,
+ replies[i].poststat.ia_gfid)) {
+ sources[i] = 0;
+ }
+ }
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (i == source || !healed_sinks[i])
+ continue;
+
+ if (src != -1) {
+ if (!gf_uuid_compare(replies[src].poststat.ia_gfid,
+ replies[i].poststat.ia_gfid))
+ continue;
+ } else if (replies[i].op_errno != ENOENT) {
+ continue;
+ }
- if (replies[i].op_errno != ENOENT)
- continue;
+ ret |= afr_selfheal_recreate_entry(frame, i, source, sources, fd->inode,
+ name, inode, replies);
+ }
- ret = afr_selfheal_recreate_entry (this, i, source, fd->inode,
- name, inode, replies,
- newentry);
- }
+ return ret;
+}
- if (AFR_COUNT (newentry, priv->child_count))
- afr_selfheal_newentry_mark (frame, this, inode, source, replies,
- sources, newentry);
- return ret;
+static int
+__afr_selfheal_entry_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ char *name, inode_t *inode, int source,
+ unsigned char *sources, unsigned char *healed_sinks,
+ unsigned char *locked_on, struct afr_reply *replies)
+{
+ int ret = -1;
+
+ if (source < 0)
+ ret = __afr_selfheal_merge_dirent(frame, this, fd, name, inode, sources,
+ healed_sinks, locked_on, replies);
+ else
+ ret = __afr_selfheal_heal_dirent(frame, this, fd, name, inode, source,
+ sources, healed_sinks, locked_on,
+ replies);
+ return ret;
}
+static gf_boolean_t
+is_full_heal_marker_present(xlator_t *this, dict_t *xdata, int idx)
+{
+ int i = 0;
+ int pending[3] = {
+ 0,
+ };
+ void *pending_raw = NULL;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (!xdata)
+ return _gf_false;
+
+ /* Iterate over each of the priv->pending_keys[] elements and then
+ * see if any of them have data segment non-zero. If they do, return
+ * true. Else return false.
+ */
+ for (i = 0; i < priv->child_count; i++) {
+ if (dict_get_ptr(xdata, priv->pending_key[i], &pending_raw))
+ continue;
+
+ if (!pending_raw)
+ continue;
+
+ memcpy(pending, pending_raw, sizeof(pending));
+ if (ntoh32(pending[idx]))
+ return _gf_true;
+ }
+
+ return _gf_false;
+}
-static int
-__afr_selfheal_entry_dirent (call_frame_t *frame, xlator_t *this, fd_t *fd,
- char *name, inode_t *inode, int source,
- unsigned char *sources, unsigned char *healed_sinks,
- unsigned char *locked_on,
- struct afr_reply *replies)
+static gf_boolean_t
+afr_need_full_heal(xlator_t *this, struct afr_reply *replies, int source,
+ unsigned char *healed_sinks, afr_transaction_type type)
{
- int ret = -1;
-
- if (source < 0)
- ret = __afr_selfheal_merge_dirent (frame, this, fd, name, inode,
- sources, healed_sinks,
- locked_on, replies);
- else
- ret = __afr_selfheal_heal_dirent (frame, this, fd, name, inode,
- source, sources, healed_sinks,
- locked_on, replies);
- return ret;
+ int i = 0;
+ int idx = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (!priv->esh_granular)
+ return _gf_true;
+
+ if (type != AFR_ENTRY_TRANSACTION)
+ return _gf_true;
+
+ priv = this->private;
+ idx = afr_index_for_transaction_type(AFR_DATA_TRANSACTION);
+
+ /* If there is a clear source, check whether the full-heal-indicator
+ * is present in its xdata. Otherwise, we need to examine all the
+ * participating bricks and then figure if *even* one of them has a
+ * full-heal-indicator.
+ */
+
+ if (source != -1) {
+ if (is_full_heal_marker_present(this, replies[source].xdata, idx))
+ return _gf_true;
+ }
+
+ /* else ..*/
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!healed_sinks[i])
+ continue;
+
+ if (is_full_heal_marker_present(this, replies[i].xdata, idx))
+ return _gf_true;
+ }
+
+ return _gf_false;
}
static int
-__afr_selfheal_entry_finalize_source (xlator_t *this, unsigned char *sources,
- unsigned char *healed_sinks,
- unsigned char *locked_on,
- struct afr_reply *replies,
- uint64_t *witness)
+__afr_selfheal_entry_finalize_source(xlator_t *this, unsigned char *sources,
+ unsigned char *healed_sinks,
+ unsigned char *locked_on,
+ struct afr_reply *replies,
+ uint64_t *witness)
{
- afr_private_t *priv = NULL;
- int source = -1;
- int sources_count = 0;
+ afr_private_t *priv = NULL;
+ int source = -1;
+ int sources_count = 0;
+ int i = 0;
- priv = this->private;
+ priv = this->private;
- sources_count = AFR_COUNT (sources, priv->child_count);
+ sources_count = AFR_COUNT(sources, priv->child_count);
- if ((AFR_CMP (locked_on, healed_sinks, priv->child_count) == 0)
- || !sources_count || afr_does_witness_exist (this, witness)) {
+ if ((AFR_CMP(locked_on, healed_sinks, priv->child_count) == 0) ||
+ !sources_count || afr_does_witness_exist(this, witness)) {
+ memset(sources, 0, sizeof(*sources) * priv->child_count);
+ afr_mark_active_sinks(this, sources, locked_on, healed_sinks);
+ return -1;
+ }
- memset (sources, 0, sizeof (*sources) * priv->child_count);
- afr_mark_active_sinks (this, sources, locked_on, healed_sinks);
- return -1;
- }
+ source = afr_choose_source_by_policy(priv, sources, AFR_ENTRY_TRANSACTION);
- source = afr_choose_source_by_policy (priv, sources,
- AFR_ENTRY_TRANSACTION);
- return source;
+ /*If the selected source does not blame any other brick, then mark
+ * everything as sink to trigger conservative merge.
+ */
+ if (source != -1 && !AFR_COUNT(healed_sinks, priv->child_count)) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (locked_on[i]) {
+ sources[i] = 0;
+ healed_sinks[i] = 1;
+ }
+ }
+ return -1;
+ }
+
+ return source;
}
int
-__afr_selfheal_entry_prepare (call_frame_t *frame, xlator_t *this,
- inode_t *inode, unsigned char *locked_on,
- unsigned char *sources, unsigned char *sinks,
- unsigned char *healed_sinks,
- struct afr_reply *replies, int *source_p,
- gf_boolean_t *pflag)
+__afr_selfheal_entry_prepare(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, unsigned char *locked_on,
+ unsigned char *sources, unsigned char *sinks,
+ unsigned char *healed_sinks,
+ struct afr_reply *replies, int *source_p,
+ unsigned char *pflag)
{
- int ret = -1;
- int source = -1;
- afr_private_t *priv = NULL;
- uint64_t *witness = NULL;
-
- priv = this->private;
-
- ret = afr_selfheal_unlocked_discover (frame, inode, inode->gfid,
- replies);
- if (ret)
- return ret;
-
- witness = alloca0 (sizeof (*witness) * priv->child_count);
- ret = afr_selfheal_find_direction (frame, this, replies,
- AFR_ENTRY_TRANSACTION,
- locked_on, sources, sinks, witness,
- pflag);
- if (ret)
- return ret;
-
- /* Initialize the healed_sinks[] array optimistically to
- the intersection of to-be-healed (i.e sinks[]) and
- the list of servers which are up (i.e locked_on[]).
-
- As we encounter failures in the healing process, we
- will unmark the respective servers in the healed_sinks[]
- array.
- */
- AFR_INTERSECT (healed_sinks, sinks, locked_on, priv->child_count);
-
- source = __afr_selfheal_entry_finalize_source (this, sources,
- healed_sinks,
- locked_on, replies,
- witness);
-
- if (source < 0) {
- /* If source is < 0 (typically split-brain), we perform a
- conservative merge of entries rather than erroring out */
- }
- *source_p = source;
-
- return ret;
+ int ret = -1;
+ int source = -1;
+ afr_private_t *priv = NULL;
+ uint64_t *witness = NULL;
+
+ priv = this->private;
+
+ ret = afr_selfheal_unlocked_discover(frame, inode, inode->gfid, replies);
+ if (ret)
+ return ret;
+
+ witness = alloca0(sizeof(*witness) * priv->child_count);
+ ret = afr_selfheal_find_direction(frame, this, replies,
+ AFR_ENTRY_TRANSACTION, locked_on, sources,
+ sinks, witness, pflag);
+ if (ret)
+ return ret;
+
+ /* Initialize the healed_sinks[] array optimistically to
+ the intersection of to-be-healed (i.e sinks[]) and
+ the list of servers which are up (i.e locked_on[]).
+
+ As we encounter failures in the healing process, we
+ will unmark the respective servers in the healed_sinks[]
+ array.
+ */
+ AFR_INTERSECT(healed_sinks, sinks, locked_on, priv->child_count);
+
+ source = __afr_selfheal_entry_finalize_source(this, sources, healed_sinks,
+ locked_on, replies, witness);
+
+ if (source < 0) {
+ /* If source is < 0 (typically split-brain), we perform a
+ conservative merge of entries rather than erroring out */
+ }
+ *source_p = source;
+
+ return ret;
}
static int
-afr_selfheal_entry_dirent (call_frame_t *frame, xlator_t *this,
- fd_t *fd, char *name)
+afr_selfheal_entry_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ char *name, inode_t *parent_idx_inode,
+ xlator_t *subvol, gf_boolean_t full_crawl)
{
- int ret = 0;
- int source = -1;
- unsigned char *locked_on = NULL;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *healed_sinks = NULL;
- inode_t *inode = NULL;
- struct afr_reply *replies = NULL;
- struct afr_reply *par_replies = NULL;
- afr_private_t *priv = NULL;
-
- priv = this->private;
-
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
- locked_on = alloca0 (priv->child_count);
-
- replies = alloca0 (priv->child_count * sizeof(*replies));
- par_replies = alloca0 (priv->child_count * sizeof(*par_replies));
-
- ret = afr_selfheal_entrylk (frame, this, fd->inode, this->name, NULL,
- locked_on);
- {
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- gf_msg_debug (this->name, 0, "%s: Skipping "
- "entry self-heal as only %d sub-volumes "
- " could be locked in %s domain",
- uuid_utoa (fd->inode->gfid),
- ret, this->name);
- ret = -ENOTCONN;
- goto unlock;
- }
-
- ret = __afr_selfheal_entry_prepare (frame, this, fd->inode,
- locked_on,
- sources, sinks,
- healed_sinks, par_replies,
- &source, NULL);
- if (ret < 0)
- goto unlock;
-
- inode = afr_selfheal_unlocked_lookup_on (frame, fd->inode, name,
- replies, locked_on,
- NULL);
- if (!inode) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = __afr_selfheal_entry_dirent (frame, this, fd, name, inode,
- source, sources, healed_sinks,
- locked_on, replies);
- }
+ int ret = 0;
+ int source = -1;
+ unsigned char *locked_on = NULL;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ unsigned char *healed_sinks = NULL;
+ inode_t *inode = NULL;
+ struct afr_reply *replies = NULL;
+ struct afr_reply *par_replies = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xattr = NULL;
+
+ priv = this->private;
+
+ if (afr_is_private_directory(priv, fd->inode->gfid, name,
+ GF_CLIENT_PID_SELF_HEALD)) {
+ return 0;
+ }
+
+ xattr = dict_new();
+ if (!xattr)
+ return -ENOMEM;
+ ret = dict_set_int32_sizen(xattr, GF_GFIDLESS_LOOKUP, 1);
+ if (ret) {
+ dict_unref(xattr);
+ return -1;
+ }
+
+ sources = alloca0(priv->child_count);
+ sinks = alloca0(priv->child_count);
+ healed_sinks = alloca0(priv->child_count);
+ locked_on = alloca0(priv->child_count);
+
+ replies = alloca0(priv->child_count * sizeof(*replies));
+ par_replies = alloca0(priv->child_count * sizeof(*par_replies));
+
+ ret = afr_selfheal_entrylk(frame, this, fd->inode, this->name, NULL,
+ locked_on);
+ {
+ if (ret < priv->child_count) {
+ gf_msg_debug(this->name, 0,
+ "%s: Skipping "
+ "entry self-heal as only %d sub-volumes "
+ " could be locked in %s domain",
+ uuid_utoa(fd->inode->gfid), ret, this->name);
+ ret = -ENOTCONN;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_entry_prepare(frame, this, fd->inode, locked_on,
+ sources, sinks, healed_sinks,
+ par_replies, &source, NULL);
+ if (ret < 0)
+ goto unlock;
+
+ inode = afr_selfheal_unlocked_lookup_on(frame, fd->inode, name, replies,
+ locked_on, xattr);
+ if (!inode) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_entry_dirent(frame, this, fd, name, inode, source,
+ sources, healed_sinks, locked_on,
+ replies);
+
+ if ((ret == 0) && (priv->esh_granular) && parent_idx_inode) {
+ ret = afr_shd_entry_purge(subvol, parent_idx_inode, name,
+ inode->ia_type);
+ /* Why is ret force-set to 0? We do not care about
+ * index purge failing for full heal as it is quite
+ * possible during replace-brick that not all files
+ * and directories have their name indices present in
+ * entry-changes/.
+ */
+ ret = 0;
+ }
+ }
+
unlock:
- afr_selfheal_unentrylk (frame, this, fd->inode, this->name, NULL,
- locked_on);
- if (inode)
- inode_unref (inode);
- if (replies)
- afr_replies_wipe (replies, priv->child_count);
- if (par_replies)
- afr_replies_wipe (par_replies, priv->child_count);
-
- return ret;
+ afr_selfheal_unentrylk(frame, this, fd->inode, this->name, NULL, locked_on,
+ NULL);
+ if (inode)
+ inode_unref(inode);
+ if (replies)
+ afr_replies_wipe(replies, priv->child_count);
+ if (par_replies)
+ afr_replies_wipe(par_replies, priv->child_count);
+ if (xattr)
+ dict_unref(xattr);
+
+ return ret;
}
+static inode_t *
+afr_shd_entry_changes_index_inode(xlator_t *this, xlator_t *subvol,
+ uuid_t pargfid)
+{
+ int ret = -1;
+ void *index_gfid = NULL;
+ loc_t rootloc = {
+ 0,
+ };
+ loc_t loc = {
+ 0,
+ };
+ dict_t *xattr = NULL;
+ inode_t *inode = NULL;
+ struct iatt iatt = {
+ 0,
+ };
+
+ rootloc.inode = inode_ref(this->itable->root);
+ gf_uuid_copy(rootloc.gfid, rootloc.inode->gfid);
+
+ ret = syncop_getxattr(subvol, &rootloc, &xattr,
+ GF_XATTROP_ENTRY_CHANGES_GFID, NULL, NULL);
+ if (ret || !xattr) {
+ errno = -ret;
+ goto out;
+ }
+
+ ret = dict_get_ptr(xattr, GF_XATTROP_ENTRY_CHANGES_GFID, &index_gfid);
+ if (ret) {
+ errno = EINVAL;
+ goto out;
+ }
+
+ loc.inode = inode_new(this->itable);
+ if (!loc.inode) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ gf_uuid_copy(loc.pargfid, index_gfid);
+ loc.name = gf_strdup(uuid_utoa(pargfid));
+
+ ret = syncop_lookup(subvol, &loc, &iatt, NULL, NULL, NULL);
+ if (ret < 0) {
+ errno = -ret;
+ goto out;
+ }
+
+ inode = inode_link(loc.inode, NULL, NULL, &iatt);
+
+out:
+ if (xattr)
+ dict_unref(xattr);
+ loc_wipe(&rootloc);
+ GF_FREE((char *)loc.name);
+ loc_wipe(&loc);
+
+ return inode;
+}
static int
-afr_selfheal_entry_do_subvol (call_frame_t *frame, xlator_t *this,
- fd_t *fd, int child)
+afr_selfheal_entry_do_subvol(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int child)
{
- int ret = 0;
- gf_dirent_t entries;
- gf_dirent_t *entry = NULL;
- off_t offset = 0;
- call_frame_t *iter_frame = NULL;
- xlator_t *subvol = NULL;
- afr_private_t *priv = NULL;
- gf_boolean_t mismatch = _gf_false;
-
- priv = this->private;
- subvol = priv->children[child];
-
- INIT_LIST_HEAD (&entries.list);
-
- iter_frame = afr_copy_frame (frame);
- if (!iter_frame)
- return -ENOMEM;
-
- while ((ret = syncop_readdir (subvol, fd, 131072, offset, &entries,
- NULL, NULL))) {
- if (ret > 0)
- ret = 0;
- list_for_each_entry (entry, &entries.list, list) {
- offset = entry->d_off;
-
- if (!strcmp (entry->d_name, ".") ||
- !strcmp (entry->d_name, ".."))
- continue;
-
- if (__is_root_gfid (fd->inode->gfid) &&
- !strcmp (entry->d_name, GF_REPLICATE_TRASH_DIR))
- continue;
-
- ret = afr_selfheal_entry_dirent (iter_frame, this, fd,
- entry->d_name);
- AFR_STACK_RESET (iter_frame);
- if (iter_frame->local == NULL) {
- ret = -ENOTCONN;
- break;
- }
-
- if (ret == -1) {
- /* gfid or type mismatch. */
- mismatch = _gf_true;
- ret = 0;
- }
- if (ret)
- break;
- }
-
- gf_dirent_free (&entries);
- if (ret)
- break;
- }
-
- AFR_STACK_DESTROY (iter_frame);
- if (mismatch == _gf_true)
- /* undo pending will be skipped */
- ret = -1;
- return ret;
+ int ret = 0;
+ gf_dirent_t entries;
+ gf_dirent_t *entry = NULL;
+ off_t offset = 0;
+ call_frame_t *iter_frame = NULL;
+ xlator_t *subvol = NULL;
+ afr_private_t *priv = NULL;
+ gf_boolean_t mismatch = _gf_false;
+ afr_local_t *local = NULL;
+ loc_t loc = {
+ 0,
+ };
+
+ priv = this->private;
+ subvol = priv->children[child];
+
+ INIT_LIST_HEAD(&entries.list);
+
+ local = frame->local;
+
+ iter_frame = afr_copy_frame(frame);
+ if (!iter_frame)
+ return -ENOMEM;
+
+ loc.inode = afr_shd_entry_changes_index_inode(this, subvol,
+ fd->inode->gfid);
+
+ while ((ret = syncop_readdir(subvol, fd, 131072, offset, &entries, NULL,
+ NULL))) {
+ if (ret > 0)
+ ret = 0;
+ list_for_each_entry(entry, &entries.list, list)
+ {
+ offset = entry->d_off;
+
+ if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, ".."))
+ continue;
+
+ ret = afr_selfheal_entry_dirent(iter_frame, this, fd, entry->d_name,
+ loc.inode, subvol,
+ local->need_full_crawl);
+ AFR_STACK_RESET(iter_frame);
+ if (iter_frame->local == NULL) {
+ ret = -ENOTCONN;
+ break;
+ }
+
+ if (ret == -1) {
+ /* gfid or type mismatch. */
+ mismatch = _gf_true;
+ ret = 0;
+ }
+ if (ret)
+ break;
+ }
+
+ gf_dirent_free(&entries);
+ if (ret)
+ break;
+ }
+
+ loc_wipe(&loc);
+
+ AFR_STACK_DESTROY(iter_frame);
+ if (mismatch == _gf_true)
+ /* undo pending will be skipped */
+ ret = -1;
+ return ret;
}
static int
-afr_selfheal_entry_do (call_frame_t *frame, xlator_t *this, fd_t *fd,
- int source, unsigned char *sources,
- unsigned char *healed_sinks)
+afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
+ loc_t *parent, void *data)
{
- int i = 0;
- afr_private_t *priv = NULL;
- gf_boolean_t mismatch = _gf_false;
- int ret = 0;
-
- priv = this->private;
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_SELF_HEAL_INFO, "performing entry selfheal on %s",
- uuid_utoa (fd->inode->gfid));
-
- for (i = 0; i < priv->child_count; i++) {
- if (!healed_sinks[i])
- continue;
- ret = afr_selfheal_entry_do_subvol (frame, this, fd, i);
- if (ret == -1) {
- /* gfid or type mismatch. */
- mismatch = _gf_true;
- ret = 0;
- }
- if (ret)
- break;
- }
- if (!ret && source != -1)
- ret = afr_selfheal_entry_do_subvol (frame, this, fd, source);
-
- if (mismatch == _gf_true)
- /* undo pending will be skipped */
- ret = -1;
- return ret;
+ int ret = 0;
+ loc_t loc = {
+ 0,
+ };
+ struct iatt iatt = {
+ 0,
+ };
+ afr_granular_esh_args_t *args = data;
+
+ /* Look up the actual inode associated with entry. If the lookup returns
+ * ESTALE or ENOENT, then it means we have a stale index. Remove it.
+ * This is analogous to the check in afr_shd_index_heal() except that
+ * here it is achieved through LOOKUP and in afr_shd_index_heal() through
+ * a GETXATTR.
+ */
+
+ loc.inode = inode_new(args->xl->itable);
+ loc.parent = inode_ref(args->heal_fd->inode);
+ gf_uuid_copy(loc.pargfid, loc.parent->gfid);
+ loc.name = entry->d_name;
+
+ ret = syncop_lookup(args->xl, &loc, &iatt, NULL, NULL, NULL);
+ if ((ret == -ENOENT) || (ret == -ESTALE)) {
+ /* The name indices under the pgfid index dir are guaranteed
+ * to be regular files. Hence the hardcoding.
+ */
+ afr_shd_entry_purge(subvol, parent->inode, entry->d_name, IA_IFREG);
+ ret = 0;
+ goto out;
+ }
+ /* TBD: afr_shd_zero_xattrop? */
+
+ ret = afr_selfheal_entry_dirent(args->frame, args->xl, args->heal_fd,
+ entry->d_name, parent->inode, subvol,
+ _gf_false);
+ AFR_STACK_RESET(args->frame);
+ if (args->frame->local == NULL)
+ ret = -ENOTCONN;
+
+ if (ret == -1)
+ args->mismatch = _gf_true;
+
+out:
+ loc_wipe(&loc);
+ return 0;
}
static int
-__afr_selfheal_entry (call_frame_t *frame, xlator_t *this, fd_t *fd,
- unsigned char *locked_on)
+afr_selfheal_entry_granular(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int subvol_idx, gf_boolean_t is_src)
{
- int ret = -1;
- int source = -1;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *data_lock = NULL;
- unsigned char *postop_lock = NULL;
- unsigned char *healed_sinks = NULL;
- struct afr_reply *locked_replies = NULL;
- afr_private_t *priv = NULL;
- gf_boolean_t did_sh = _gf_true;
-
- priv = this->private;
-
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
- data_lock = alloca0 (priv->child_count);
- postop_lock = alloca0 (priv->child_count);
-
- locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count);
-
- ret = afr_selfheal_entrylk (frame, this, fd->inode, this->name, NULL,
- data_lock);
- {
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- gf_msg_debug (this->name, 0, "%s: Skipping "
- "entry self-heal as only %d sub-volumes could "
- "be locked in %s domain",
- uuid_utoa (fd->inode->gfid), ret,
- this->name);
- ret = -ENOTCONN;
- goto unlock;
- }
-
- ret = __afr_selfheal_entry_prepare (frame, this, fd->inode,
- data_lock, sources, sinks,
- healed_sinks,
- locked_replies, &source,
- NULL);
- if (AFR_COUNT(healed_sinks, priv->child_count) == 0) {
- did_sh = _gf_false;
- goto unlock;
- }
- }
-unlock:
- afr_selfheal_unentrylk (frame, this, fd->inode, this->name, NULL,
- data_lock);
- if (ret < 0)
- goto out;
+ int ret = 0;
+ loc_t loc = {
+ 0,
+ };
+ xlator_t *subvol = NULL;
+ afr_private_t *priv = NULL;
+ afr_granular_esh_args_t args = {
+ 0,
+ };
+
+ priv = this->private;
+ subvol = priv->children[subvol_idx];
+
+ args.frame = afr_copy_frame(frame);
+ if (!args.frame)
+ goto out;
+ args.xl = this;
+ /* args.heal_fd represents the fd associated with the original directory
+ * on which entry heal is being attempted.
+ */
+ args.heal_fd = fd;
+
+ /* @subvol here represents the subvolume of AFR where
+ * indices/entry-changes/<pargfid> will be processed
+ */
+ loc.inode = afr_shd_entry_changes_index_inode(this, subvol,
+ fd->inode->gfid);
+ if (!loc.inode) {
+ /* If granular heal failed on the sink (as it might sometimes
+ * because it is the src that would mostly contain the granular
+ * changelogs and the sink's entry-changes would be empty),
+ * do not treat heal as failure.
+ */
+ if (is_src)
+ ret = -errno;
+ else
+ ret = 0;
+ goto out;
+ }
- if (!did_sh)
- goto out;
+ ret = syncop_dir_scan(subvol, &loc, GF_CLIENT_PID_SELF_HEALD, &args,
+ afr_selfheal_entry_granular_dirent);
- ret = afr_selfheal_entry_do (frame, this, fd, source, sources,
- healed_sinks);
- if (ret)
- goto out;
-
- /* Take entrylks in xlator domain before doing post-op (undo-pending) in
- * entry self-heal. This is to prevent a parallel name self-heal on
- * an entry under @fd->inode from reading pending xattrs while it is
- * being modified by SHD after entry sh below, given that
- * name self-heal takes locks ONLY in xlator domain and is free to read
- * pending changelog in the absence of the following locking.
- */
- ret = afr_selfheal_entrylk (frame, this, fd->inode, this->name, NULL,
- postop_lock);
- {
- if (AFR_CMP (data_lock, postop_lock, priv->child_count) != 0) {
- gf_msg_debug (this->name, 0, "%s: Skipping "
- "post-op after entry self-heal as %d "
- "sub-volumes, as opposed to %d, "
- "could be locked in %s domain",
- uuid_utoa (fd->inode->gfid),
- ret, AFR_COUNT (data_lock,
- priv->child_count), this->name);
- ret = -ENOTCONN;
- goto postop_unlock;
- }
-
- ret = afr_selfheal_undo_pending (frame, this, fd->inode,
- sources, sinks, healed_sinks,
- AFR_ENTRY_TRANSACTION,
- locked_replies, postop_lock);
- }
-postop_unlock:
- afr_selfheal_unentrylk (frame, this, fd->inode, this->name, NULL,
- postop_lock);
+ loc_wipe(&loc);
+
+ if (args.mismatch == _gf_true)
+ ret = -1;
out:
- if (did_sh)
- afr_log_selfheal (fd->inode->gfid, this, ret, "entry", source,
- healed_sinks);
+ if (args.frame)
+ AFR_STACK_DESTROY(args.frame);
+ return ret;
+}
+
+static int
+afr_selfheal_entry_do(call_frame_t *frame, xlator_t *this, fd_t *fd, int source,
+ unsigned char *sources, unsigned char *healed_sinks)
+{
+ int i = 0;
+ int ret = 0;
+ gf_boolean_t mismatch = _gf_false;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
+ "performing entry selfheal on %s", uuid_utoa(fd->inode->gfid));
+
+ for (i = 0; i < priv->child_count; i++) {
+ /* Expunge */
+ if (!healed_sinks[i])
+ continue;
+
+ if (!local->need_full_crawl)
+ /* Why call afr_selfheal_entry_granular() on a "healed sink",
+ * given that it is the source that contains the granular
+ * indices?
+ * If the index for this directory is non-existent or empty on
+ * this subvol (=> clear sink), then it will return early
+ * without failure status.
+ * If the index is non-empty and it is yet a 'healed sink', then
+ * it is due to a split-brain in which case we anyway need to
+ * crawl the indices/entry-changes/pargfid directory.
+ */
+ ret = afr_selfheal_entry_granular(frame, this, fd, i, _gf_false);
else
- ret = 1;
+ ret = afr_selfheal_entry_do_subvol(frame, this, fd, i);
- if (locked_replies)
- afr_replies_wipe (locked_replies, priv->child_count);
- return ret;
+ if (ret == -1) {
+ /* gfid or type mismatch. */
+ mismatch = _gf_true;
+ ret = 0;
+ }
+ if (ret)
+ break;
+ }
+
+ if (!ret && source != -1) {
+ /* Impunge */
+ if (local->need_full_crawl)
+ ret = afr_selfheal_entry_do_subvol(frame, this, fd, source);
+ else
+ ret = afr_selfheal_entry_granular(frame, this, fd, source,
+ _gf_true);
+ }
+
+ if (mismatch == _gf_true)
+ /* undo pending will be skipped */
+ ret = -1;
+ return ret;
}
+static int
+__afr_selfheal_entry(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ unsigned char *locked_on)
+{
+ int ret = -1;
+ int source = -1;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ unsigned char *data_lock = NULL;
+ unsigned char *postop_lock = NULL;
+ unsigned char *healed_sinks = NULL;
+ unsigned char *undid_pending = NULL;
+ struct afr_reply *locked_replies = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ gf_boolean_t did_sh = _gf_true;
+
+ priv = this->private;
+ local = frame->local;
+
+ sources = alloca0(priv->child_count);
+ sinks = alloca0(priv->child_count);
+ healed_sinks = alloca0(priv->child_count);
+ undid_pending = alloca0(priv->child_count);
+ data_lock = alloca0(priv->child_count);
+ postop_lock = alloca0(priv->child_count);
+
+ locked_replies = alloca0(sizeof(*locked_replies) * priv->child_count);
+
+ ret = afr_selfheal_entrylk(frame, this, fd->inode, this->name, NULL,
+ data_lock);
+ {
+ if (ret < priv->child_count) {
+ gf_msg_debug(this->name, 0,
+ "%s: Skipping "
+ "entry self-heal as only %d sub-volumes could "
+ "be locked in %s domain",
+ uuid_utoa(fd->inode->gfid), ret, this->name);
+ ret = -ENOTCONN;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_entry_prepare(frame, this, fd->inode, data_lock,
+ sources, sinks, healed_sinks,
+ locked_replies, &source, NULL);
+ if (AFR_COUNT(healed_sinks, priv->child_count) == 0) {
+ did_sh = _gf_false;
+ goto unlock;
+ }
+
+ local->need_full_crawl = afr_need_full_heal(
+ this, locked_replies, source, healed_sinks, AFR_ENTRY_TRANSACTION);
+ }
+unlock:
+ afr_selfheal_unentrylk(frame, this, fd->inode, this->name, NULL, data_lock,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ if (!did_sh)
+ goto out;
+
+ ret = afr_selfheal_entry_do(frame, this, fd, source, sources, healed_sinks);
+ if (ret)
+ goto out;
+
+ /* Take entrylks in xlator domain before doing post-op (undo-pending) in
+ * entry self-heal. This is to prevent a parallel name self-heal on
+ * an entry under @fd->inode from reading pending xattrs while it is
+ * being modified by SHD after entry sh below, given that
+ * name self-heal takes locks ONLY in xlator domain and is free to read
+ * pending changelog in the absence of the following locking.
+ */
+ ret = afr_selfheal_entrylk(frame, this, fd->inode, this->name, NULL,
+ postop_lock);
+ {
+ if (AFR_CMP(data_lock, postop_lock, priv->child_count) != 0) {
+ gf_msg_debug(this->name, 0,
+ "%s: Skipping "
+ "post-op after entry self-heal as %d "
+ "sub-volumes, as opposed to %d, "
+ "could be locked in %s domain",
+ uuid_utoa(fd->inode->gfid), ret,
+ AFR_COUNT(data_lock, priv->child_count), this->name);
+ ret = -ENOTCONN;
+ goto postop_unlock;
+ }
+
+ afr_selfheal_restore_time(frame, this, fd->inode, source, healed_sinks,
+ locked_replies);
+ ret = afr_selfheal_undo_pending(
+ frame, this, fd->inode, sources, sinks, healed_sinks, undid_pending,
+ AFR_ENTRY_TRANSACTION, locked_replies, postop_lock);
+ }
+postop_unlock:
+ afr_selfheal_unentrylk(frame, this, fd->inode, this->name, NULL,
+ postop_lock, NULL);
+out:
+ if (did_sh)
+ afr_log_selfheal(fd->inode->gfid, this, ret, "entry", source, sources,
+ healed_sinks);
+ else
+ ret = 1;
+
+ if (locked_replies)
+ afr_replies_wipe(locked_replies, priv->child_count);
+ return ret;
+}
static fd_t *
-afr_selfheal_data_opendir (xlator_t *this, inode_t *inode)
+afr_selfheal_data_opendir(xlator_t *this, inode_t *inode)
{
- loc_t loc = {0,};
- int ret = 0;
- fd_t *fd = NULL;
-
- fd = fd_create (inode, 0);
- if (!fd)
- return NULL;
-
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
-
- ret = syncop_opendir (this, &loc, fd, NULL, NULL);
- if (ret) {
- fd_unref (fd);
- fd = NULL;
- } else {
- fd_bind (fd);
- }
-
- loc_wipe (&loc);
- return fd;
+ loc_t loc = {
+ 0,
+ };
+ int ret = 0;
+ fd_t *fd = NULL;
+
+ fd = fd_create(inode, 0);
+ if (!fd)
+ return NULL;
+
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
+
+ ret = syncop_opendir(this, &loc, fd, NULL, NULL);
+ if (ret) {
+ fd_unref(fd);
+ fd = NULL;
+ } else {
+ fd_bind(fd);
+ }
+
+ loc_wipe(&loc);
+ return fd;
}
-
int
-afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode)
+afr_selfheal_entry(call_frame_t *frame, xlator_t *this, inode_t *inode)
{
- afr_private_t *priv = NULL;
- unsigned char *locked_on = NULL;
- unsigned char *long_name_locked = NULL;
- fd_t *fd = NULL;
- int ret = 0;
-
- priv = this->private;
-
- fd = afr_selfheal_data_opendir (this, inode);
- if (!fd)
- return -EIO;
-
- locked_on = alloca0 (priv->child_count);
- long_name_locked = alloca0 (priv->child_count);
-
- ret = afr_selfheal_tryentrylk (frame, this, inode, priv->sh_domain, NULL,
- locked_on);
- {
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- gf_msg_debug (this->name, 0, "%s: Skipping "
- "entry self-heal as only %d sub-volumes could "
- "be locked in %s domain",
- uuid_utoa (fd->inode->gfid), ret,
- priv->sh_domain);
- /* Either less than two subvols available, or another
- selfheal (from another server) is in progress. Skip
- for now in any case there isn't anything to do.
- */
- ret = -ENOTCONN;
- goto unlock;
- }
-
- ret = afr_selfheal_tryentrylk (frame, this, inode, this->name,
- LONG_FILENAME, long_name_locked);
- {
- if (ret < 1) {
- gf_msg_debug (this->name, 0, "%s: Skipping"
- " entry self-heal as only %d "
- "sub-volumes could be "
- "locked in special-filename "
- "domain",
- uuid_utoa (fd->inode->gfid),
- ret);
- ret = -ENOTCONN;
- goto unlock;
- }
- ret = __afr_selfheal_entry (frame, this, fd, locked_on);
- }
- afr_selfheal_unentrylk (frame, this, inode, this->name,
- LONG_FILENAME, long_name_locked);
- }
+ afr_private_t *priv = NULL;
+ unsigned char *locked_on = NULL;
+ fd_t *fd = NULL;
+ int ret = 0;
+
+ priv = this->private;
+
+ fd = afr_selfheal_data_opendir(this, inode);
+ if (!fd)
+ return -EIO;
+
+ locked_on = alloca0(priv->child_count);
+
+ ret = afr_selfheal_tie_breaker_entrylk(frame, this, inode, priv->sh_domain,
+ NULL, locked_on);
+ {
+ if (ret < priv->child_count) {
+ gf_msg_debug(this->name, 0,
+ "%s: Skipping "
+ "entry self-heal as only %d sub-volumes could "
+ "be locked in %s domain",
+ uuid_utoa(fd->inode->gfid), ret, priv->sh_domain);
+ /* Either less than two subvols available, or another
+ selfheal (from another server) is in progress. Skip
+ for now in any case there isn't anything to do.
+ */
+ ret = -ENOTCONN;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_entry(frame, this, fd, locked_on);
+ }
unlock:
- afr_selfheal_unentrylk (frame, this, inode, priv->sh_domain, NULL, locked_on);
+ afr_selfheal_unentrylk(frame, this, inode, priv->sh_domain, NULL, locked_on,
+ NULL);
- if (fd)
- fd_unref (fd);
+ if (fd)
+ fd_unref(fd);
- return ret;
+ return ret;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal-metadata.c b/xlators/cluster/afr/src/afr-self-heal-metadata.c
index 65c25abcb4a..03f43bad16e 100644
--- a/xlators/cluster/afr/src/afr-self-heal-metadata.c
+++ b/xlators/cluster/afr/src/afr-self-heal-metadata.c
@@ -8,106 +8,108 @@
cases as published by the Free Software Foundation.
*/
-
#include "afr.h"
#include "afr-self-heal.h"
-#include "byte-order.h"
+#include <glusterfs/byte-order.h>
#include "protocol-common.h"
+#include <glusterfs/events.h>
-#define AFR_HEAL_ATTR (GF_SET_ATTR_UID|GF_SET_ATTR_GID|GF_SET_ATTR_MODE)
+#define AFR_HEAL_ATTR (GF_SET_ATTR_UID | GF_SET_ATTR_GID | GF_SET_ATTR_MODE)
static gf_boolean_t
-_afr_ignorable_key_match (dict_t *d, char *k, data_t *val, void *mdata)
+_afr_ignorable_key_match(dict_t *d, char *k, data_t *val, void *mdata)
{
- return afr_is_xattr_ignorable (k);
+ return afr_is_xattr_ignorable(k);
}
void
-afr_delete_ignorable_xattrs (dict_t *xattr)
+afr_delete_ignorable_xattrs(dict_t *xattr)
{
- dict_foreach_match (xattr, _afr_ignorable_key_match, NULL,
- dict_remove_foreach_fn, NULL);
+ dict_foreach_match(xattr, _afr_ignorable_key_match, NULL,
+ dict_remove_foreach_fn, NULL);
}
int
-__afr_selfheal_metadata_do (call_frame_t *frame, xlator_t *this, inode_t *inode,
- int source, unsigned char *healed_sinks,
- struct afr_reply *locked_replies)
+__afr_selfheal_metadata_do(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ int source, unsigned char *healed_sinks,
+ struct afr_reply *locked_replies)
{
- int ret = -1;
- loc_t loc = {0,};
- dict_t *xattr = NULL;
- dict_t *old_xattr = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
-
- priv = this->private;
-
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.gfid, inode->gfid);
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_SELF_HEAL_INFO, "performing metadata selfheal on %s",
- uuid_utoa (inode->gfid));
-
- ret = syncop_getxattr (priv->children[source], &loc, &xattr, NULL,
- NULL, NULL);
- if (ret < 0) {
- ret = -EIO;
- goto out;
- }
-
- afr_delete_ignorable_xattrs (xattr);
-
- for (i = 0; i < priv->child_count; i++) {
- if (old_xattr) {
- dict_unref (old_xattr);
- old_xattr = NULL;
- }
-
- if (!healed_sinks[i])
- continue;
-
- ret = syncop_setattr (priv->children[i], &loc,
- &locked_replies[source].poststat,
- AFR_HEAL_ATTR, NULL, NULL, NULL, NULL);
- if (ret)
- healed_sinks[i] = 0;
-
- ret = syncop_getxattr (priv->children[i], &loc, &old_xattr, 0,
- NULL, NULL);
- if (old_xattr) {
- afr_delete_ignorable_xattrs (old_xattr);
- ret = syncop_removexattr (priv->children[i], &loc, "",
- old_xattr, NULL);
- }
-
- ret = syncop_setxattr (priv->children[i], &loc, xattr, 0, NULL,
- NULL);
- if (ret)
- healed_sinks[i] = 0;
- }
- ret = 0;
+ int ret = -1;
+ loc_t loc = {
+ 0,
+ };
+ dict_t *xattr = NULL;
+ dict_t *old_xattr = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+
+ priv = this->private;
+
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
+ "performing metadata selfheal on %s", uuid_utoa(inode->gfid));
+
+ ret = syncop_getxattr(priv->children[source], &loc, &xattr, NULL, NULL,
+ NULL);
+ if (ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ afr_delete_ignorable_xattrs(xattr);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (old_xattr) {
+ dict_unref(old_xattr);
+ old_xattr = NULL;
+ }
+
+ if (!healed_sinks[i])
+ continue;
+
+ ret = syncop_setattr(priv->children[i], &loc,
+ &locked_replies[source].poststat, AFR_HEAL_ATTR,
+ NULL, NULL, NULL, NULL);
+ if (ret)
+ healed_sinks[i] = 0;
+
+ ret = syncop_getxattr(priv->children[i], &loc, &old_xattr, 0, NULL,
+ NULL);
+ if (old_xattr) {
+ afr_delete_ignorable_xattrs(old_xattr);
+ ret = syncop_removexattr(priv->children[i], &loc, "", old_xattr,
+ NULL);
+ if (ret)
+ healed_sinks[i] = 0;
+ }
+
+ ret = syncop_setxattr(priv->children[i], &loc, xattr, 0, NULL, NULL);
+ if (ret)
+ healed_sinks[i] = 0;
+ }
+ ret = 0;
out:
- loc_wipe (&loc);
- if (xattr)
- dict_unref (xattr);
- if (old_xattr)
- dict_unref (old_xattr);
+ loc_wipe(&loc);
+ if (xattr)
+ dict_unref(xattr);
+ if (old_xattr)
+ dict_unref(old_xattr);
- return ret;
+ return ret;
}
static uint64_t
mtime_ns(struct iatt *ia)
{
- uint64_t ret;
+ uint64_t ret;
- ret = (((uint64_t)(ia->ia_mtime)) * 1000000000)
- + (uint64_t)(ia->ia_mtime_nsec);
+ ret = (((uint64_t)(ia->ia_mtime)) * 1000000000) +
+ (uint64_t)(ia->ia_mtime_nsec);
- return ret;
+ return ret;
}
/*
@@ -120,313 +122,425 @@ mtime_ns(struct iatt *ia)
* the source with the most recent modification date.
*/
static int
-afr_dirtime_splitbrain_source (call_frame_t *frame, xlator_t *this,
- struct afr_reply *replies,
- unsigned char *locked_on)
+afr_dirtime_splitbrain_source(call_frame_t *frame, xlator_t *this,
+ struct afr_reply *replies,
+ unsigned char *locked_on)
{
- afr_private_t *priv = NULL;
- int source = -1;
- struct iatt source_ia;
- struct iatt child_ia;
- uint64_t mtime = 0;
- int i;
- int ret = -1;
-
- priv = this->private;
+ afr_private_t *priv = NULL;
+ int source = -1;
+ struct iatt source_ia;
+ struct iatt child_ia;
+ uint64_t mtime = 0;
+ int i;
+ int ret = -1;
+
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!locked_on[i])
+ continue;
+
+ if (!replies[i].valid)
+ continue;
+
+ if (replies[i].op_ret != 0)
+ continue;
+
+ if (mtime_ns(&replies[i].poststat) <= mtime)
+ continue;
+
+ mtime = mtime_ns(&replies[i].poststat);
+ source = i;
+ }
+
+ if (source == -1)
+ goto out;
+
+ source_ia = replies[source].poststat;
+ if (source_ia.ia_type != IA_IFDIR)
+ goto out;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (i == source)
+ continue;
+
+ if (!replies[i].valid)
+ continue;
+
+ if (replies[i].op_ret != 0)
+ continue;
+
+ child_ia = replies[i].poststat;
+
+ if (!IA_EQUAL(source_ia, child_ia, gfid) ||
+ !IA_EQUAL(source_ia, child_ia, type) ||
+ !IA_EQUAL(source_ia, child_ia, prot) ||
+ !IA_EQUAL(source_ia, child_ia, uid) ||
+ !IA_EQUAL(source_ia, child_ia, gid) ||
+ !afr_xattrs_are_equal(replies[source].xdata, replies[i].xdata))
+ goto out;
+ }
+
+ /*
+ * Metadata split brain is just about [amc]time
+ * We return our source.
+ */
+ ret = source;
+out:
+ return ret;
+}
- for (i = 0; i < priv->child_count; i++) {
- if (!locked_on[i])
- continue;
+static int
+__afr_selfheal_metadata_mark_pending_xattrs(call_frame_t *frame, xlator_t *this,
+ inode_t *inode,
+ struct afr_reply *replies,
+ unsigned char *sources)
+{
+ int ret = 0;
+ int i = 0;
+ int m_idx = 0;
+ afr_private_t *priv = NULL;
+ int raw[AFR_NUM_CHANGE_LOGS] = {0};
+ dict_t *xattr = NULL;
+
+ priv = this->private;
+ m_idx = afr_index_for_transaction_type(AFR_METADATA_TRANSACTION);
+ raw[m_idx] = 1;
+
+ xattr = dict_new();
+ if (!xattr)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i])
+ continue;
+ ret = dict_set_static_bin(xattr, priv->pending_key[i], raw,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ if (ret) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ ret = afr_selfheal_post_op(frame, this, inode, i, xattr, NULL);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_INFO, -ret, AFR_MSG_SELF_HEAL_INFO,
+ "Failed to set pending metadata xattr on child %d for %s", i,
+ uuid_utoa(inode->gfid));
+ goto out;
+ }
+ }
- if (!replies[i].valid)
- continue;
+ afr_replies_wipe(replies, priv->child_count);
+ ret = afr_selfheal_unlocked_discover(frame, inode, inode->gfid, replies);
- if (replies[i].op_ret != 0)
- continue;
+out:
+ if (xattr)
+ dict_unref(xattr);
+ return ret;
+}
- if (mtime_ns(&replies[i].poststat) <= mtime)
- continue;
+/*
+ * Look for mismatching uid/gid or mode or user xattrs even if
+ * AFR xattrs don't say so, and pick one arbitrarily as winner. */
- mtime = mtime_ns(&replies[i].poststat);
- source = i;
+static int
+__afr_selfheal_metadata_finalize_source(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, unsigned char *sources,
+ unsigned char *sinks,
+ unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ unsigned char *locked_on,
+ struct afr_reply *replies)
+{
+ int i = 0;
+ afr_private_t *priv = NULL;
+ struct iatt srcstat = {
+ 0,
+ };
+ int source = -1;
+ int sources_count = 0;
+ int ret = 0;
+
+ priv = this->private;
+
+ sources_count = AFR_COUNT(sources, priv->child_count);
+
+ if ((AFR_CMP(locked_on, healed_sinks, priv->child_count) == 0) ||
+ !sources_count) {
+ source = afr_mark_split_brain_source_sinks(
+ frame, this, inode, sources, sinks, healed_sinks, locked_on,
+ replies, AFR_METADATA_TRANSACTION);
+ if (source >= 0) {
+ _afr_fav_child_reset_sink_xattrs(
+ frame, this, inode, source, healed_sinks, undid_pending,
+ AFR_METADATA_TRANSACTION, locked_on, replies);
+ goto out;
}
- if (source == -1)
- goto out;
+ /* If this is a directory mtime/ctime only split brain
+ use the most recent */
+ source = afr_dirtime_splitbrain_source(frame, this, replies, locked_on);
+ if (source != -1) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SPLIT_BRAIN,
+ "clear time "
+ "split brain on %s",
+ uuid_utoa(replies[source].poststat.ia_gfid));
+ sources[source] = 1;
+ healed_sinks[source] = 0;
+ goto out;
+ }
- source_ia = replies[source].poststat;
- if (source_ia.ia_type != IA_IFDIR)
- goto out;
+ if (!priv->metadata_splitbrain_forced_heal) {
+ gf_event(EVENT_AFR_SPLIT_BRAIN,
+ "client-pid=%d;"
+ "subvol=%s;"
+ "type=metadata;file=%s",
+ this->ctx->cmd_args.client_pid, this->name,
+ uuid_utoa(inode->gfid));
+ return -EIO;
+ }
+ /* Metadata split brain, select one subvol
+ arbitrarily */
for (i = 0; i < priv->child_count; i++) {
- if (i == source)
- continue;
-
- if (!replies[i].valid)
- continue;
-
- if (replies[i].op_ret != 0)
- continue;
+ if (locked_on[i] && healed_sinks[i]) {
+ sources[i] = 1;
+ healed_sinks[i] = 0;
+ break;
+ }
+ }
+ }
+
+ /* No split brain at this point. If we were called from
+ * afr_heal_splitbrain_file(), abort.*/
+ if (afr_dict_contains_heal_op(frame))
+ return -EIO;
+
+ source = afr_choose_source_by_policy(priv, sources,
+ AFR_METADATA_TRANSACTION);
+ srcstat = replies[source].poststat;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i] || i == source)
+ continue;
+ if (!IA_EQUAL(srcstat, replies[i].poststat, type) ||
+ !IA_EQUAL(srcstat, replies[i].poststat, uid) ||
+ !IA_EQUAL(srcstat, replies[i].poststat, gid) ||
+ !IA_EQUAL(srcstat, replies[i].poststat, prot)) {
+ gf_msg_debug(this->name, 0,
+ "%s: iatt mismatch "
+ "for source(%d) vs (%d)",
+ uuid_utoa(replies[source].poststat.ia_gfid), source,
+ i);
+ sources[i] = 0;
+ healed_sinks[i] = 1;
+ }
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i] || i == source)
+ continue;
+ if (!afr_xattrs_are_equal(replies[source].xdata, replies[i].xdata)) {
+ gf_msg_debug(this->name, 0,
+ "%s: xattr mismatch "
+ "for source(%d) vs (%d)",
+ uuid_utoa(replies[source].poststat.ia_gfid), source,
+ i);
+ sources[i] = 0;
+ healed_sinks[i] = 1;
+ }
+ }
+ if ((sources_count == priv->child_count) && (source > -1) &&
+ (AFR_COUNT(healed_sinks, priv->child_count) != 0)) {
+ ret = __afr_selfheal_metadata_mark_pending_xattrs(frame, this, inode,
+ replies, sources);
+ if (ret < 0)
+ return ret;
+ }
+out:
+ afr_mark_active_sinks(this, sources, locked_on, healed_sinks);
+ return source;
+}
- child_ia = replies[i].poststat;
+int
+__afr_selfheal_metadata_prepare(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, unsigned char *locked_on,
+ unsigned char *sources, unsigned char *sinks,
+ unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ struct afr_reply *replies, unsigned char *pflag)
+{
+ int ret = -1;
+ int source = -1;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ uint64_t *witness = NULL;
- if (!IA_EQUAL(source_ia, child_ia, gfid) ||
- !IA_EQUAL(source_ia, child_ia, type) ||
- !IA_EQUAL(source_ia, child_ia, prot) ||
- !IA_EQUAL(source_ia, child_ia, uid) ||
- !IA_EQUAL(source_ia, child_ia, gid) ||
- !afr_xattrs_are_equal (replies[source].xdata,
- replies[i].xdata))
- goto out;
- }
+ priv = this->private;
- /*
- * Metadata split brain is just about [amc]time
- * We return our source.
- */
- ret = source;
-out:
+ ret = afr_selfheal_unlocked_discover(frame, inode, inode->gfid, replies);
+ if (ret)
return ret;
-}
+ witness = alloca0(sizeof(*witness) * priv->child_count);
+ ret = afr_selfheal_find_direction(frame, this, replies,
+ AFR_METADATA_TRANSACTION, locked_on,
+ sources, sinks, witness, pflag);
+ if (ret)
+ return ret;
-/*
- * Look for mismatching uid/gid or mode or user xattrs even if
- * AFR xattrs don't say so, and pick one arbitrarily as winner. */
+ /* Initialize the healed_sinks[] array optimistically to
+ the intersection of to-be-healed (i.e sinks[]) and
+ the list of servers which are up (i.e locked_on[]).
+
+ As we encounter failures in the healing process, we
+ will unmark the respective servers in the healed_sinks[]
+ array.
+ */
+ AFR_INTERSECT(healed_sinks, sinks, locked_on, priv->child_count);
+
+ /* If any source has witness, pick first
+ * witness source and make everybody else sinks */
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i] && witness[i]) {
+ source = i;
+ break;
+ }
+ }
-static int
-__afr_selfheal_metadata_finalize_source (call_frame_t *frame, xlator_t *this,
- unsigned char *sources,
- unsigned char *sinks,
- unsigned char *healed_sinks,
- unsigned char *locked_on,
- struct afr_reply *replies)
-{
- int i = 0;
- afr_private_t *priv = NULL;
- struct iatt srcstat = {0, };
- int source = -1;
- int sources_count = 0;
-
- priv = this->private;
-
- sources_count = AFR_COUNT (sources, priv->child_count);
-
- if ((AFR_CMP (locked_on, healed_sinks, priv->child_count) == 0)
- || !sources_count) {
-
- source = afr_mark_split_brain_source_sinks (frame, this,
- sources, sinks,
- healed_sinks,
- locked_on, replies,
- AFR_METADATA_TRANSACTION);
- if (source >= 0)
- return source;
-
- /* If this is a directory mtime/ctime only split brain
- use the most recent */
- source = afr_dirtime_splitbrain_source (frame, this,
- replies, locked_on);
- if (source != -1) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_SPLIT_BRAIN, "clear time "
- "split brain on %s",
- uuid_utoa (replies[source].poststat.ia_gfid));
- sources[source] = 1;
- healed_sinks[source] = 0;
- return source;
- }
-
- if (!priv->metadata_splitbrain_forced_heal) {
- return -EIO;
- }
-
- /* Metadata split brain, select one subvol
- arbitrarily */
- for (i = 0; i < priv->child_count; i++) {
- if (locked_on[i] && healed_sinks[i]) {
- sources[i] = 1;
- healed_sinks[i] = 0;
- break;
- }
- }
- }
-
- /* No split brain at this point. If we were called from
- * afr_heal_splitbrain_file(), abort.*/
- if (afr_dict_contains_heal_op(frame))
- return -EIO;
-
- source = afr_choose_source_by_policy (priv, sources,
- AFR_METADATA_TRANSACTION);
- srcstat = replies[source].poststat;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i] || i == source)
- continue;
- if (!IA_EQUAL (srcstat, replies[i].poststat, type) ||
- !IA_EQUAL (srcstat, replies[i].poststat, uid) ||
- !IA_EQUAL (srcstat, replies[i].poststat, gid) ||
- !IA_EQUAL (srcstat, replies[i].poststat, prot)) {
- gf_msg_debug (this->name, 0, "%s: iatt mismatch "
- "for source(%d) vs (%d)",
- uuid_utoa
- (replies[source].poststat.ia_gfid),
- source, i);
- sources[i] = 0;
- healed_sinks[i] = 1;
- }
- }
-
- for (i =0; i < priv->child_count; i++) {
- if (!sources[i] || i == source)
- continue;
- if (!afr_xattrs_are_equal (replies[source].xdata,
- replies[i].xdata)) {
- gf_msg_debug (this->name, 0, "%s: xattr mismatch "
- "for source(%d) vs (%d)",
- uuid_utoa
- (replies[source].poststat.ia_gfid),
- source, i);
- sources[i] = 0;
- healed_sinks[i] = 1;
- }
+ if (source != -1) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (i != source && sources[i]) {
+ sources[i] = 0;
+ healed_sinks[i] = 1;
+ }
}
+ }
- return source;
-}
+ source = __afr_selfheal_metadata_finalize_source(
+ frame, this, inode, sources, sinks, healed_sinks, undid_pending,
+ locked_on, replies);
+ if (source < 0)
+ return -EIO;
+
+ return source;
+}
int
-__afr_selfheal_metadata_prepare (call_frame_t *frame, xlator_t *this, inode_t *inode,
- unsigned char *locked_on, unsigned char *sources,
- unsigned char *sinks, unsigned char *healed_sinks,
- struct afr_reply *replies, gf_boolean_t *pflag)
+afr_selfheal_metadata(call_frame_t *frame, xlator_t *this, inode_t *inode)
{
- int ret = -1;
- int source = -1;
- afr_private_t *priv = NULL;
- int i = 0;
- uint64_t *witness = NULL;
+ afr_private_t *priv = NULL;
+ int ret = -1;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ unsigned char *data_lock = NULL;
+ unsigned char *healed_sinks = NULL;
+ unsigned char *undid_pending = NULL;
+ struct afr_reply *locked_replies = NULL;
+ gf_boolean_t did_sh = _gf_true;
+ int source = -1;
+
+ priv = this->private;
+
+ sources = alloca0(priv->child_count);
+ sinks = alloca0(priv->child_count);
+ healed_sinks = alloca0(priv->child_count);
+ undid_pending = alloca0(priv->child_count);
+ data_lock = alloca0(priv->child_count);
+
+ locked_replies = alloca0(sizeof(*locked_replies) * priv->child_count);
+
+ ret = afr_selfheal_inodelk(frame, this, inode, this->name, LLONG_MAX - 1, 0,
+ data_lock);
+ {
+ if (ret < priv->child_count) {
+ ret = -ENOTCONN;
+ goto unlock;
+ }
- priv = this->private;
+ ret = __afr_selfheal_metadata_prepare(
+ frame, this, inode, data_lock, sources, sinks, healed_sinks,
+ undid_pending, locked_replies, NULL);
+ if (ret < 0)
+ goto unlock;
- ret = afr_selfheal_unlocked_discover (frame, inode, inode->gfid,
- replies);
- if (ret)
- return ret;
-
- witness = alloca0 (sizeof (*witness) * priv->child_count);
- ret = afr_selfheal_find_direction (frame, this, replies,
- AFR_METADATA_TRANSACTION,
- locked_on, sources, sinks, witness,
- pflag);
- if (ret)
- return ret;
-
- /* Initialize the healed_sinks[] array optimistically to
- the intersection of to-be-healed (i.e sinks[]) and
- the list of servers which are up (i.e locked_on[]).
-
- As we encounter failures in the healing process, we
- will unmark the respective servers in the healed_sinks[]
- array.
- */
- AFR_INTERSECT (healed_sinks, sinks, locked_on, priv->child_count);
-
- /* If any source has witness, pick first
- * witness source and make everybody else sinks */
- for (i = 0; i < priv->child_count; i++) {
- if (sources[i] && witness[i]) {
- source = i;
- break;
- }
- }
+ source = ret;
- if (source != -1) {
- for (i = 0; i < priv->child_count; i++) {
- if (i != source && sources[i]) {
- sources[i] = 0;
- healed_sinks[i] = 1;
- }
- }
+ if (AFR_COUNT(healed_sinks, priv->child_count) == 0) {
+ did_sh = _gf_false;
+ goto unlock;
}
- source = __afr_selfheal_metadata_finalize_source (frame, this, sources,
- sinks, healed_sinks,
- locked_on, replies);
+ ret = __afr_selfheal_metadata_do(frame, this, inode, source,
+ healed_sinks, locked_replies);
+ if (ret)
+ goto unlock;
- if (source < 0)
- return -EIO;
+ afr_selfheal_restore_time(frame, this, inode, source, healed_sinks,
+ locked_replies);
- return source;
+ ret = afr_selfheal_undo_pending(
+ frame, this, inode, sources, sinks, healed_sinks, undid_pending,
+ AFR_METADATA_TRANSACTION, locked_replies, data_lock);
+ }
+unlock:
+ afr_selfheal_uninodelk(frame, this, inode, this->name, LLONG_MAX - 1, 0,
+ data_lock);
+
+ if (did_sh)
+ afr_log_selfheal(inode->gfid, this, ret, "metadata", source, sources,
+ healed_sinks);
+ else
+ ret = 1;
+
+ if (locked_replies)
+ afr_replies_wipe(locked_replies, priv->child_count);
+ return ret;
}
int
-afr_selfheal_metadata (call_frame_t *frame, xlator_t *this, inode_t *inode)
+afr_selfheal_metadata_by_stbuf(xlator_t *this, struct iatt *stbuf)
{
- afr_private_t *priv = NULL;
- int ret = -1;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *data_lock = NULL;
- unsigned char *healed_sinks = NULL;
- struct afr_reply *locked_replies = NULL;
- gf_boolean_t did_sh = _gf_true;
- int source = -1;
-
- priv = this->private;
-
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
- data_lock = alloca0 (priv->child_count);
-
- locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count);
-
- ret = afr_selfheal_inodelk (frame, this, inode, this->name,
- LLONG_MAX - 1, 0, data_lock);
- {
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- ret = -ENOTCONN;
- goto unlock;
- }
-
- ret = __afr_selfheal_metadata_prepare (frame, this, inode,
- data_lock, sources,
- sinks, healed_sinks,
- locked_replies, NULL);
- if (ret < 0)
- goto unlock;
-
- source = ret;
-
- if (AFR_COUNT (healed_sinks, priv->child_count) == 0) {
- did_sh = _gf_false;
- goto unlock;
- }
-
- ret = __afr_selfheal_metadata_do (frame, this, inode, source,
- healed_sinks, locked_replies);
- if (ret)
- goto unlock;
-
- ret = afr_selfheal_undo_pending (frame, this, inode, sources,
- sinks, healed_sinks,
- AFR_METADATA_TRANSACTION,
- locked_replies, data_lock);
- }
-unlock:
- afr_selfheal_uninodelk (frame, this, inode, this->name,
- LLONG_MAX -1, 0, data_lock);
-
- if (did_sh)
- afr_log_selfheal (inode->gfid, this, ret, "metadata", source,
- healed_sinks);
- else
- ret = 1;
-
- if (locked_replies)
- afr_replies_wipe (locked_replies, priv->child_count);
- return ret;
+ inode_t *inode = NULL;
+ inode_t *link_inode = NULL;
+ call_frame_t *frame = NULL;
+ int ret = 0;
+
+ if (gf_uuid_is_null(stbuf->ia_gfid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ inode = inode_new(this->itable);
+ if (!inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ link_inode = inode_link(inode, NULL, NULL, stbuf);
+ if (!link_inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ frame = afr_frame_create(this, &ret);
+ if (!frame) {
+ ret = -ret;
+ goto out;
+ }
+
+ ret = afr_selfheal_metadata(frame, this, link_inode);
+out:
+ if (inode)
+ inode_unref(inode);
+ if (link_inode)
+ inode_unref(link_inode);
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ return ret;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
index 9f7a5b1ff0f..834aac86d48 100644
--- a/xlators/cluster/afr/src/afr-self-heal-name.c
+++ b/xlators/cluster/afr/src/afr-self-heal-name.c
@@ -8,712 +8,609 @@
cases as published by the Free Software Foundation.
*/
-
+#include <glusterfs/events.h>
#include "afr.h"
#include "afr-self-heal.h"
#include "afr-messages.h"
int
-__afr_selfheal_assign_gfid (xlator_t *this, inode_t *parent, uuid_t pargfid,
- const char *bname, inode_t *inode,
- struct afr_reply *replies, void *gfid,
- unsigned char *locked_on,
- gf_boolean_t is_gfid_absent)
+__afr_selfheal_assign_gfid(xlator_t *this, inode_t *parent, uuid_t pargfid,
+ const char *bname, inode_t *inode,
+ struct afr_reply *replies, void *gfid,
+ unsigned char *locked_on, int source,
+ unsigned char *sources, gf_boolean_t is_gfid_absent,
+ int *gfid_idx)
{
- int ret = 0;
- int up_count = 0;
- int locked_count = 0;
- afr_private_t *priv = NULL;
- dict_t *xdata = NULL;
- loc_t loc = {0, };
- call_frame_t *new_frame = NULL;
- afr_local_t *new_local = NULL;
-
- priv = this->private;
-
- new_frame = afr_frame_create (this);
- if (!new_frame) {
- ret = -ENOMEM;
- goto out;
- }
-
- new_local = new_frame->local;
-
- gf_uuid_copy (parent->gfid, pargfid);
+ int ret = 0;
+ int up_count = 0;
+ int locked_count = 0;
+ afr_private_t *priv = NULL;
- xdata = dict_new ();
- if (!xdata) {
- ret = -ENOMEM;
- goto out;
- }
+ priv = this->private;
- ret = dict_set_static_bin (xdata, "gfid-req", gfid, 16);
- if (ret) {
- ret = -ENOMEM;
- goto out;
- }
+ gf_uuid_copy(parent->gfid, pargfid);
- loc.parent = inode_ref (parent);
- loc.inode = inode_ref (inode);
- gf_uuid_copy (loc.pargfid, pargfid);
- loc.name = bname;
+ if (is_gfid_absent) {
+ /* Ensure all children of AFR are up before performing gfid heal, to
+ * guard against the possibility of gfid split brain. */
- if (is_gfid_absent) {
- /* Ensure all children of AFR are up before performing gfid heal, to
- * guard against the possibility of gfid split brain. */
-
- up_count = AFR_COUNT (priv->child_up, priv->child_count);
- if (up_count != priv->child_count) {
- ret = -EIO;
- goto out;
- }
-
- locked_count = AFR_COUNT (locked_on, priv->child_count);
- if (locked_count != priv->child_count) {
- ret = -EIO;
- goto out;
- }
+ up_count = AFR_COUNT(priv->child_up, priv->child_count);
+ if (up_count != priv->child_count) {
+ ret = -EIO;
+ goto out;
}
- /* Clear out old replies here and wind lookup on all locked
- * subvolumes to achieve two things:
- * a. gfid heal on those subvolumes that do not have gfid associated
- * with the inode, and
- * b. refresh replies, which can be consumed by
- * __afr_selfheal_name_impunge().
- */
-
- AFR_ONLIST (locked_on, new_frame, afr_selfheal_discover_cbk, lookup,
- &loc, xdata);
-
- afr_replies_wipe (replies, priv->child_count);
+ locked_count = AFR_COUNT(locked_on, priv->child_count);
+ if (locked_count != priv->child_count) {
+ ret = -EIO;
+ goto out;
+ }
+ }
- afr_replies_copy (replies, new_local->replies, priv->child_count);
+ ret = afr_lookup_and_heal_gfid(this, parent, bname, inode, replies, source,
+ sources, gfid, gfid_idx);
out:
- loc_wipe (&loc);
- if (xdata)
- dict_unref (xdata);
- if (new_frame)
- AFR_STACK_DESTROY (new_frame);
-
- return ret;
+ return ret;
}
int
-__afr_selfheal_name_impunge (call_frame_t *frame, xlator_t *this,
- inode_t *parent, uuid_t pargfid,
- const char *bname, inode_t *inode,
- struct afr_reply *replies, int gfid_idx)
+__afr_selfheal_name_impunge(call_frame_t *frame, xlator_t *this,
+ inode_t *parent, uuid_t pargfid, const char *bname,
+ inode_t *inode, struct afr_reply *replies,
+ int gfid_idx)
{
- int i = 0;
- afr_private_t *priv = NULL;
- int ret = 0;
- unsigned char *newentry = NULL;
- unsigned char *sources = NULL;
-
- priv = this->private;
-
- newentry = alloca0 (priv->child_count);
- sources = alloca0 (priv->child_count);
+ int i = 0;
+ afr_private_t *priv = NULL;
+ int ret = 0;
+ unsigned char *sources = NULL;
- gf_uuid_copy (parent->gfid, pargfid);
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
+ sources = alloca0(priv->child_count);
- if (gf_uuid_compare (replies[i].poststat.ia_gfid,
- replies[gfid_idx].poststat.ia_gfid) == 0) {
- sources[i] = 1;
- continue;
- }
+ gf_uuid_copy(parent->gfid, pargfid);
- ret |= afr_selfheal_recreate_entry (this, i, gfid_idx, parent,
- bname, inode, replies,
- newentry);
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret != 0)
+ continue;
- if (AFR_COUNT (newentry, priv->child_count))
- afr_selfheal_newentry_mark (frame, this, inode, gfid_idx, replies,
- sources, newentry);
- return ret;
-}
+ if (gf_uuid_compare(replies[i].poststat.ia_gfid,
+ replies[gfid_idx].poststat.ia_gfid) == 0) {
+ sources[i] = 1;
+ continue;
+ }
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i])
+ continue;
-int
-__afr_selfheal_name_expunge (xlator_t *this, inode_t *parent, uuid_t pargfid,
- const char *bname, inode_t *inode,
- struct afr_reply *replies)
-{
- loc_t loc = {0, };
- int i = 0;
- afr_private_t *priv = NULL;
- char g[64];
- int ret = 0;
-
- priv = this->private;
-
- loc.parent = inode_ref (parent);
- gf_uuid_copy (loc.pargfid, pargfid);
- loc.name = bname;
- loc.inode = inode_ref (inode);
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
-
- if (replies[i].op_ret)
- continue;
-
- switch (replies[i].poststat.ia_type) {
- case IA_IFDIR:
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_EXPUNGING_FILE_OR_DIR,
- "expunging dir %s/%s (%s) on %s",
- uuid_utoa (pargfid), bname,
- uuid_utoa_r (replies[i].poststat.ia_gfid, g),
- priv->children[i]->name);
-
- ret |= syncop_rmdir (priv->children[i], &loc, 1, NULL,
- NULL);
- break;
- default:
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_EXPUNGING_FILE_OR_DIR,
- "expunging file %s/%s (%s) on %s",
- uuid_utoa (pargfid), bname,
- uuid_utoa_r (replies[i].poststat.ia_gfid, g),
- priv->children[i]->name);
-
- ret |= syncop_unlink (priv->children[i], &loc, NULL,
- NULL);
- break;
- }
- }
-
- loc_wipe (&loc);
-
- return ret;
+ ret |= afr_selfheal_recreate_entry(frame, i, gfid_idx, sources, parent,
+ bname, inode, replies);
+ }
+ return ret;
}
-/* This function is to be called after ensuring that there is no gfid mismatch
- * for the inode across multiple sources
- */
-static int
-afr_selfheal_gfid_idx_get (xlator_t *this, struct afr_reply *replies,
- unsigned char *sources)
+int
+__afr_selfheal_name_expunge(xlator_t *this, inode_t *parent, uuid_t pargfid,
+ const char *bname, inode_t *inode,
+ struct afr_reply *replies)
{
- int i = 0;
- int gfid_idx = -1;
- afr_private_t *priv = NULL;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ int ret = 0;
- priv = this->private;
+ priv = this->private;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
- if (!sources[i])
- continue;
+ if (replies[i].op_ret)
+ continue;
- if (gf_uuid_is_null (replies[i].poststat.ia_gfid))
- continue;
+ ret |= afr_selfheal_entry_delete(this, parent, bname, inode, i,
+ replies);
+ }
- gfid_idx = i;
- break;
- }
- return gfid_idx;
+ return ret;
}
static gf_boolean_t
-afr_selfheal_name_need_heal_check (xlator_t *this, struct afr_reply *replies)
+afr_selfheal_name_need_heal_check(xlator_t *this, struct afr_reply *replies)
{
- int i = 0;
- int first_idx = -1;
- gf_boolean_t need_heal = _gf_false;
- afr_private_t *priv = NULL;
-
- priv = this->private;
+ int i = 0;
+ int first_idx = -1;
+ gf_boolean_t need_heal = _gf_false;
+ afr_private_t *priv = NULL;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
+ priv = this->private;
- if ((replies[i].op_ret == -1) &&
- (replies[i].op_errno == ENODATA))
- need_heal = _gf_true;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
- if (first_idx == -1) {
- first_idx = i;
- continue;
- }
+ if ((replies[i].op_ret == -1) && (replies[i].op_errno == ENODATA))
+ need_heal = _gf_true;
- if (replies[i].op_ret != replies[first_idx].op_ret)
- need_heal = _gf_true;
+ if (first_idx == -1) {
+ first_idx = i;
+ continue;
+ }
- if (gf_uuid_compare (replies[i].poststat.ia_gfid,
- replies[first_idx].poststat.ia_gfid))
- need_heal = _gf_true;
+ if (replies[i].op_ret != replies[first_idx].op_ret)
+ need_heal = _gf_true;
- if ((replies[i].op_ret == 0) &&
- (gf_uuid_is_null(replies[i].poststat.ia_gfid)))
- need_heal = _gf_true;
+ if (gf_uuid_compare(replies[i].poststat.ia_gfid,
+ replies[first_idx].poststat.ia_gfid))
+ need_heal = _gf_true;
- }
+ if ((replies[i].op_ret == 0) &&
+ (gf_uuid_is_null(replies[i].poststat.ia_gfid)))
+ need_heal = _gf_true;
+ }
- return need_heal;
+ return need_heal;
}
static int
-afr_selfheal_name_type_mismatch_check (xlator_t *this, struct afr_reply *replies,
- int source, unsigned char *sources,
- uuid_t pargfid, const char *bname)
+afr_selfheal_name_type_mismatch_check(xlator_t *this, struct afr_reply *replies,
+ int source, unsigned char *sources,
+ uuid_t pargfid, const char *bname)
{
- int i = 0;
- int type_idx = -1;
- ia_type_t inode_type = IA_INVAL;
- afr_private_t *priv = NULL;
-
- priv = this->private;
+ int i = 0;
+ int type_idx = -1;
+ ia_type_t inode_type = IA_INVAL;
+ ia_type_t inode_type1 = IA_INVAL;
+ afr_private_t *priv = NULL;
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
+ priv = this->private;
- if (replies[i].poststat.ia_type == IA_INVAL)
- continue;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret != 0)
+ continue;
- if (inode_type == IA_INVAL) {
- inode_type = replies[i].poststat.ia_type;
- type_idx = i;
- continue;
- }
+ if (replies[i].poststat.ia_type == IA_INVAL)
+ continue;
- if (sources[i] || source == -1) {
- if ((sources[type_idx] || source == -1) &&
- (inode_type != replies[i].poststat.ia_type)) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_SPLIT_BRAIN,
- "Type mismatch for <gfid:%s>/%s: "
- "%d on %s and %d on %s",
- uuid_utoa(pargfid), bname,
- replies[i].poststat.ia_type,
- priv->children[i]->name,
- replies[type_idx].poststat.ia_type,
- priv->children[type_idx]->name);
-
- return -EIO;
- }
- inode_type = replies[i].poststat.ia_type;
- type_idx = i;
- }
+ if (inode_type == IA_INVAL) {
+ inode_type = replies[i].poststat.ia_type;
+ type_idx = i;
+ continue;
}
- return 0;
+ inode_type1 = replies[i].poststat.ia_type;
+ if (sources[i] || source == -1) {
+ if ((sources[type_idx] || source == -1) &&
+ (inode_type != inode_type1)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN,
+ "Type mismatch for <gfid:%s>/%s: "
+ "%s on %s and %s on %s",
+ uuid_utoa(pargfid), bname,
+ gf_inode_type_to_str(inode_type1),
+ priv->children[i]->name,
+ gf_inode_type_to_str(inode_type),
+ priv->children[type_idx]->name);
+ gf_event(EVENT_AFR_SPLIT_BRAIN,
+ "client-pid=%d;"
+ "subvol=%s;type=file;"
+ "file=<gfid:%s>/%s;count=2;"
+ "child-%d=%s;type-%d=%s;child-%d=%s;"
+ "type-%d=%s",
+ this->ctx->cmd_args.client_pid, this->name,
+ uuid_utoa(pargfid), bname, i, priv->children[i]->name,
+ i, gf_inode_type_to_str(inode_type1), type_idx,
+ priv->children[type_idx]->name, type_idx,
+ gf_inode_type_to_str(inode_type));
+ return -EIO;
+ }
+ inode_type = replies[i].poststat.ia_type;
+ type_idx = i;
+ }
+ }
+ return 0;
}
static int
-afr_selfheal_name_gfid_mismatch_check (xlator_t *this, struct afr_reply *replies,
- int source, unsigned char *sources,
- int *gfid_idx, uuid_t pargfid,
- const char *bname)
+afr_selfheal_name_gfid_mismatch_check(xlator_t *this, struct afr_reply *replies,
+ int source, unsigned char *sources,
+ int *gfid_idx, uuid_t pargfid,
+ const char *bname, inode_t *inode,
+ unsigned char *locked_on, dict_t *xdata)
{
- int i = 0;
- int gfid_idx_iter = -1;
- void *gfid = NULL;
- afr_private_t *priv = NULL;
- char g1[64], g2[64];
-
- priv = this->private;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
-
- if (gf_uuid_is_null (replies[i].poststat.ia_gfid))
- continue;
-
- if (!gfid) {
- gfid = &replies[i].poststat.ia_gfid;
- gfid_idx_iter = i;
- continue;
- }
-
- if (sources[i] || source == -1) {
- if ((sources[gfid_idx_iter] || source == -1) &&
- gf_uuid_compare (gfid, replies[i].poststat.ia_gfid)) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_SPLIT_BRAIN,
- "GFID mismatch for <gfid:%s>/%s "
- "%s on %s and %s on %s",
- uuid_utoa (pargfid), bname,
- uuid_utoa_r (replies[i].poststat.ia_gfid, g1),
- priv->children[i]->name,
- uuid_utoa_r (replies[gfid_idx_iter].poststat.ia_gfid, g2),
- priv->children[gfid_idx_iter]->name);
-
- return -EIO;
- }
-
- gfid = &replies[i].poststat.ia_gfid;
- gfid_idx_iter = i;
- }
- }
-
- *gfid_idx = gfid_idx_iter;
- return 0;
+ int i = 0;
+ int gfid_idx_iter = -1;
+ int ret = -1;
+ void *gfid = NULL;
+ void *gfid1 = NULL;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid || replies[i].op_ret != 0)
+ continue;
+
+ if (gf_uuid_is_null(replies[i].poststat.ia_gfid))
+ continue;
+
+ if (!gfid) {
+ gfid = &replies[i].poststat.ia_gfid;
+ gfid_idx_iter = i;
+ continue;
+ }
+
+ gfid1 = &replies[i].poststat.ia_gfid;
+ if (sources[i] || source == -1) {
+ if ((sources[gfid_idx_iter] || source == -1) &&
+ gf_uuid_compare(gfid, gfid1)) {
+ ret = afr_gfid_split_brain_source(this, replies, inode, pargfid,
+ bname, gfid_idx_iter, i,
+ locked_on, gfid_idx, xdata);
+ if (!ret && *gfid_idx >= 0) {
+ ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ "GFID split-brain resolved");
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ AFR_MSG_DICT_SET_FAILED,
+ "Error setting gfid-"
+ "heal-msg dict");
+ }
+ return ret;
+ }
+ gfid = &replies[i].poststat.ia_gfid;
+ gfid_idx_iter = i;
+ }
+ }
+
+ *gfid_idx = gfid_idx_iter;
+ return 0;
}
static gf_boolean_t
-afr_selfheal_name_source_empty_check (xlator_t *this, struct afr_reply *replies,
- unsigned char *sources, int source)
+afr_selfheal_name_source_empty_check(xlator_t *this, struct afr_reply *replies,
+ unsigned char *sources, int source)
{
- int i = 0;
- afr_private_t *priv = NULL;
- gf_boolean_t source_is_empty = _gf_true;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ gf_boolean_t source_is_empty = _gf_true;
- priv = this->private;
+ priv = this->private;
- if (source == -1) {
- source_is_empty = _gf_false;
- goto out;
- }
+ if (source == -1) {
+ source_is_empty = _gf_false;
+ goto out;
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (!sources[i])
- continue;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
- if (replies[i].op_ret == -1 && replies[i].op_errno == ENOENT)
- continue;
+ if (replies[i].op_ret == -1 && replies[i].op_errno == ENOENT)
+ continue;
- source_is_empty = _gf_false;
- break;
- }
+ source_is_empty = _gf_false;
+ break;
+ }
out:
- return source_is_empty;
+ return source_is_empty;
}
int
-__afr_selfheal_name_do (call_frame_t *frame, xlator_t *this, inode_t *parent,
- uuid_t pargfid, const char *bname, inode_t *inode,
- unsigned char *sources, unsigned char *sinks,
- unsigned char *healed_sinks, int source,
- unsigned char *locked_on, struct afr_reply *replies,
- void *gfid_req)
+__afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
+ uuid_t pargfid, const char *bname, inode_t *inode,
+ unsigned char *sources, unsigned char *sinks,
+ unsigned char *healed_sinks, int source,
+ unsigned char *locked_on, struct afr_reply *replies,
+ void *gfid_req, dict_t *xdata)
{
- int gfid_idx = -1;
- int ret = -1;
- void *gfid = NULL;
- gf_boolean_t source_is_empty = _gf_true;
- gf_boolean_t need_heal = _gf_false;
- gf_boolean_t is_gfid_absent = _gf_false;
-
- need_heal = afr_selfheal_name_need_heal_check (this, replies);
- if (!need_heal)
- return 0;
-
- source_is_empty = afr_selfheal_name_source_empty_check (this, replies,
- sources,
- source);
- if (source_is_empty) {
- ret = __afr_selfheal_name_expunge (this, parent, pargfid,
- bname, inode, replies);
- if (ret == -EIO)
- ret = -1;
- return ret;
- }
-
- ret = afr_selfheal_name_type_mismatch_check (this, replies, source,
- sources, pargfid, bname);
- if (ret)
- return ret;
+ int gfid_idx = -1;
+ int ret = -1;
+ void *gfid = NULL;
+ gf_boolean_t source_is_empty = _gf_true;
+ gf_boolean_t need_heal = _gf_false;
+ gf_boolean_t is_gfid_absent = _gf_false;
+
+ need_heal = afr_selfheal_name_need_heal_check(this, replies);
+ if (!need_heal)
+ return 0;
- ret = afr_selfheal_name_gfid_mismatch_check (this, replies, source,
- sources, &gfid_idx,
- pargfid, bname);
- if (ret)
- return ret;
+ source_is_empty = afr_selfheal_name_source_empty_check(this, replies,
+ sources, source);
+ if (source_is_empty) {
+ ret = __afr_selfheal_name_expunge(this, parent, pargfid, bname, inode,
+ replies);
+ if (ret == -EIO)
+ ret = -1;
+ return ret;
+ }
- if (gfid_idx == -1) {
- if (!gfid_req || gf_uuid_is_null (gfid_req))
- return -1;
- gfid = gfid_req;
- } else {
- gfid = &replies[gfid_idx].poststat.ia_gfid;
- }
+ ret = afr_selfheal_name_type_mismatch_check(this, replies, source, sources,
+ pargfid, bname);
+ if (ret)
+ return ret;
- is_gfid_absent = (gfid_idx == -1) ? _gf_true : _gf_false;
- ret = __afr_selfheal_assign_gfid (this, parent, pargfid, bname, inode,
- replies, gfid, locked_on,
- is_gfid_absent);
- if (ret)
- return ret;
+ ret = afr_selfheal_name_gfid_mismatch_check(this, replies, source, sources,
+ &gfid_idx, pargfid, bname,
+ inode, locked_on, xdata);
+ if (ret)
+ return ret;
- if (gfid_idx == -1) {
- gfid_idx = afr_selfheal_gfid_idx_get (this, replies, sources);
- if (gfid_idx == -1)
- return -1;
- }
+ if (gfid_idx == -1) {
+ if (!gfid_req || gf_uuid_is_null(gfid_req))
+ return -1;
+ gfid = gfid_req;
+ } else {
+ gfid = &replies[gfid_idx].poststat.ia_gfid;
+ if (source == -1)
+ /* Either entry split-brain or dirty xattrs are present on parent.*/
+ source = gfid_idx;
+ }
+
+ is_gfid_absent = (gfid_idx == -1) ? _gf_true : _gf_false;
+ ret = __afr_selfheal_assign_gfid(this, parent, pargfid, bname, inode,
+ replies, gfid, locked_on, source, sources,
+ is_gfid_absent, &gfid_idx);
+ if (ret || (gfid_idx < 0))
+ return ret;
- ret = __afr_selfheal_name_impunge (frame, this, parent, pargfid,
- bname, inode,
- replies, gfid_idx);
- if (ret == -EIO)
- ret = -1;
+ ret = __afr_selfheal_name_impunge(frame, this, parent, pargfid, bname,
+ inode, replies, gfid_idx);
+ if (ret == -EIO)
+ ret = -1;
- return ret;
+ return ret;
}
-
int
-__afr_selfheal_name_finalize_source (xlator_t *this, unsigned char *sources,
- unsigned char *healed_sinks,
- unsigned char *locked_on,
- struct afr_reply *replies,
- uint64_t *witness)
+__afr_selfheal_name_finalize_source(xlator_t *this, unsigned char *sources,
+ unsigned char *healed_sinks,
+ unsigned char *locked_on, uint64_t *witness)
{
- int i = 0;
- afr_private_t *priv = NULL;
- int source = -1;
- int sources_count = 0;
-
- priv = this->private;
-
- sources_count = AFR_COUNT (sources, priv->child_count);
-
- if ((AFR_CMP (locked_on, healed_sinks, priv->child_count) == 0)
- || !sources_count || afr_does_witness_exist (this, witness)) {
- memset (sources, 0, sizeof (*sources) * priv->child_count);
- afr_mark_active_sinks (this, sources, locked_on, healed_sinks);
- return -1;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (sources[i]) {
- source = i;
- break;
- }
- }
-
- return source;
+ int i = 0;
+ afr_private_t *priv = NULL;
+ int source = -1;
+ int sources_count = 0;
+
+ priv = this->private;
+
+ sources_count = AFR_COUNT(sources, priv->child_count);
+
+ if ((AFR_CMP(locked_on, healed_sinks, priv->child_count) == 0) ||
+ !sources_count || afr_does_witness_exist(this, witness)) {
+ memset(sources, 0, sizeof(*sources) * priv->child_count);
+ afr_mark_active_sinks(this, sources, locked_on, healed_sinks);
+ return -1;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i]) {
+ source = i;
+ break;
+ }
+ }
+
+ return source;
}
int
-__afr_selfheal_name_prepare (call_frame_t *frame, xlator_t *this, inode_t *parent,
- uuid_t pargfid, unsigned char *locked_on,
- unsigned char *sources, unsigned char *sinks,
- unsigned char *healed_sinks, int *source_p)
+__afr_selfheal_name_prepare(call_frame_t *frame, xlator_t *this,
+ inode_t *parent, uuid_t pargfid,
+ unsigned char *locked_on, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks,
+ int *source_p)
{
- int ret = -1;
- int source = -1;
- afr_private_t *priv = NULL;
- struct afr_reply *replies = NULL;
- uint64_t *witness = NULL;
-
- priv = this->private;
-
- replies = alloca0 (priv->child_count * sizeof(*replies));
-
- ret = afr_selfheal_unlocked_discover (frame, parent, pargfid, replies);
- if (ret)
- goto out;
-
- witness = alloca0 (sizeof (*witness) * priv->child_count);
- ret = afr_selfheal_find_direction (frame, this, replies,
- AFR_ENTRY_TRANSACTION,
- locked_on, sources, sinks, witness,
- NULL);
- if (ret)
- goto out;
-
- /* Initialize the healed_sinks[] array optimistically to
- the intersection of to-be-healed (i.e sinks[]) and
- the list of servers which are up (i.e locked_on[]).
-
- As we encounter failures in the healing process, we
- will unmark the respective servers in the healed_sinks[]
- array.
- */
- AFR_INTERSECT (healed_sinks, sinks, locked_on, priv->child_count);
-
- source = __afr_selfheal_name_finalize_source (this, sources,
- healed_sinks,
- locked_on, replies,
- witness);
- if (source < 0) {
- /* If source is < 0 (typically split-brain), we perform a
- conservative merge of entries rather than erroring out */
- }
- *source_p = source;
+ int ret = -1;
+ int source = -1;
+ afr_private_t *priv = NULL;
+ struct afr_reply *replies = NULL;
+ uint64_t *witness = NULL;
+
+ priv = this->private;
+
+ replies = alloca0(priv->child_count * sizeof(*replies));
+
+ ret = afr_selfheal_unlocked_discover(frame, parent, pargfid, replies);
+ if (ret)
+ goto out;
+
+ witness = alloca0(sizeof(*witness) * priv->child_count);
+ ret = afr_selfheal_find_direction(frame, this, replies,
+ AFR_ENTRY_TRANSACTION, locked_on, sources,
+ sinks, witness, NULL);
+ if (ret)
+ goto out;
+
+ /* Initialize the healed_sinks[] array optimistically to
+ the intersection of to-be-healed (i.e sinks[]) and
+ the list of servers which are up (i.e locked_on[]).
+
+ As we encounter failures in the healing process, we
+ will unmark the respective servers in the healed_sinks[]
+ array.
+ */
+ AFR_INTERSECT(healed_sinks, sinks, locked_on, priv->child_count);
+
+ source = __afr_selfheal_name_finalize_source(this, sources, healed_sinks,
+ locked_on, witness);
+ if (source < 0) {
+ /* If source is < 0 (typically split-brain), we perform a
+ conservative merge of entries rather than erroring out */
+ }
+ *source_p = source;
out:
- if (replies)
- afr_replies_wipe (replies, priv->child_count);
+ if (replies)
+ afr_replies_wipe(replies, priv->child_count);
- return ret;
+ return ret;
}
-
int
-afr_selfheal_name_do (call_frame_t *frame, xlator_t *this, inode_t *parent,
- uuid_t pargfid, const char *bname, void *gfid_req)
+afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
+ uuid_t pargfid, const char *bname, void *gfid_req,
+ dict_t *xdata)
{
- afr_private_t *priv = NULL;
- unsigned char *sources = NULL;
- unsigned char *sinks = NULL;
- unsigned char *healed_sinks = NULL;
- unsigned char *locked_on = NULL;
- int source = -1;
- struct afr_reply *replies = NULL;
- int ret = -1;
- inode_t *inode = NULL;
- dict_t *xattr = NULL;
-
- xattr = dict_new ();
- if (!xattr)
- return -ENOMEM;
-
- ret = dict_set_int32 (xattr, GF_GFIDLESS_LOOKUP, 1);
- if (ret) {
- dict_destroy (xattr);
- return -1;
+ afr_private_t *priv = NULL;
+ unsigned char *sources = NULL;
+ unsigned char *sinks = NULL;
+ unsigned char *healed_sinks = NULL;
+ unsigned char *locked_on = NULL;
+ int source = -1;
+ struct afr_reply *replies = NULL;
+ int ret = -1;
+ inode_t *inode = NULL;
+ dict_t *xattr = NULL;
+
+ xattr = dict_new();
+ if (!xattr)
+ return -ENOMEM;
+
+ ret = dict_set_int32_sizen(xattr, GF_GFIDLESS_LOOKUP, 1);
+ if (ret) {
+ dict_unref(xattr);
+ return -1;
+ }
+
+ priv = this->private;
+
+ locked_on = alloca0(priv->child_count);
+ sources = alloca0(priv->child_count);
+ sinks = alloca0(priv->child_count);
+ healed_sinks = alloca0(priv->child_count);
+
+ replies = alloca0(priv->child_count * sizeof(*replies));
+
+ ret = afr_selfheal_entrylk(frame, this, parent, this->name, bname,
+ locked_on);
+ {
+ if (ret < priv->child_count) {
+ ret = -ENOTCONN;
+ goto unlock;
+ }
+
+ ret = __afr_selfheal_name_prepare(frame, this, parent, pargfid,
+ locked_on, sources, sinks,
+ healed_sinks, &source);
+ if (ret)
+ goto unlock;
+
+ inode = afr_selfheal_unlocked_lookup_on(frame, parent, bname, replies,
+ locked_on, xattr);
+ if (!inode) {
+ ret = -ENOMEM;
+ goto unlock;
}
- priv = this->private;
-
- locked_on = alloca0 (priv->child_count);
- sources = alloca0 (priv->child_count);
- sinks = alloca0 (priv->child_count);
- healed_sinks = alloca0 (priv->child_count);
-
- replies = alloca0 (priv->child_count * sizeof(*replies));
-
- ret = afr_selfheal_entrylk (frame, this, parent, this->name, bname,
- locked_on);
- {
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
- ret = -ENOTCONN;
- goto unlock;
- }
-
- ret = __afr_selfheal_name_prepare (frame, this, parent, pargfid,
- locked_on, sources, sinks,
- healed_sinks, &source);
- if (ret)
- goto unlock;
-
- inode = afr_selfheal_unlocked_lookup_on (frame, parent, bname,
- replies, locked_on,
- xattr);
- if (!inode) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = __afr_selfheal_name_do (frame, this, parent, pargfid,
- bname, inode, sources, sinks,
- healed_sinks, source, locked_on,
- replies, gfid_req);
- }
+ ret = __afr_selfheal_name_do(frame, this, parent, pargfid, bname, inode,
+ sources, sinks, healed_sinks, source,
+ locked_on, replies, gfid_req, xdata);
+ }
unlock:
- afr_selfheal_unentrylk (frame, this, parent, this->name, bname,
- locked_on);
- if (inode)
- inode_unref (inode);
+ afr_selfheal_unentrylk(frame, this, parent, this->name, bname, locked_on,
+ NULL);
+ if (inode)
+ inode_unref(inode);
- if (replies)
- afr_replies_wipe (replies, priv->child_count);
- if (xattr)
- dict_unref (xattr);
+ if (replies)
+ afr_replies_wipe(replies, priv->child_count);
+ if (xattr)
+ dict_unref(xattr);
- return ret;
+ return ret;
}
-
int
-afr_selfheal_name_unlocked_inspect (call_frame_t *frame, xlator_t *this,
- inode_t *parent, uuid_t pargfid,
- const char *bname, gf_boolean_t *need_heal)
+afr_selfheal_name_unlocked_inspect(call_frame_t *frame, xlator_t *this,
+ inode_t *parent, uuid_t pargfid,
+ const char *bname, gf_boolean_t *need_heal)
{
- afr_private_t *priv = NULL;
- int i = 0;
- struct afr_reply *replies = NULL;
- inode_t *inode = NULL;
- int first_idx = -1;
-
- priv = this->private;
-
- replies = alloca0 (sizeof (*replies) * priv->child_count);
-
- inode = afr_selfheal_unlocked_lookup_on (frame, parent, bname,
- replies, priv->child_up, NULL);
- if (!inode)
- return -ENOMEM;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!replies[i].valid)
- continue;
-
- if ((replies[i].op_ret == -1) &&
- (replies[i].op_errno == ENODATA))
- *need_heal = _gf_true;
-
- if (first_idx == -1) {
- first_idx = i;
- continue;
- }
-
- if (replies[i].op_ret != replies[first_idx].op_ret)
- *need_heal = _gf_true;
-
- if (gf_uuid_compare (replies[i].poststat.ia_gfid,
- replies[first_idx].poststat.ia_gfid))
- *need_heal = _gf_true;
- }
-
- if (inode)
- inode_unref (inode);
- if (replies)
- afr_replies_wipe (replies, priv->child_count);
- return 0;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ struct afr_reply *replies = NULL;
+ inode_t *inode = NULL;
+ int first_idx = -1;
+ afr_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ replies = alloca0(sizeof(*replies) * priv->child_count);
+
+ inode = afr_selfheal_unlocked_lookup_on(frame, parent, bname, replies,
+ local->child_up, NULL);
+ if (!inode)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!replies[i].valid)
+ continue;
+
+ if ((replies[i].op_ret == -1) && (replies[i].op_errno == ENODATA)) {
+ *need_heal = _gf_true;
+ break;
+ }
+
+ if (first_idx == -1) {
+ first_idx = i;
+ continue;
+ }
+
+ if (replies[i].op_ret != replies[first_idx].op_ret) {
+ *need_heal = _gf_true;
+ break;
+ }
+
+ if (gf_uuid_compare(replies[i].poststat.ia_gfid,
+ replies[first_idx].poststat.ia_gfid)) {
+ *need_heal = _gf_true;
+ break;
+ }
+ }
+
+ if (inode)
+ inode_unref(inode);
+ if (replies)
+ afr_replies_wipe(replies, priv->child_count);
+ return 0;
}
int
-afr_selfheal_name (xlator_t *this, uuid_t pargfid, const char *bname,
- void *gfid_req)
+afr_selfheal_name(xlator_t *this, uuid_t pargfid, const char *bname,
+ void *gfid_req, dict_t *xdata)
{
- inode_t *parent = NULL;
- call_frame_t *frame = NULL;
- int ret = -1;
- gf_boolean_t need_heal = _gf_false;
-
- parent = afr_inode_find (this, pargfid);
- if (!parent)
- goto out;
-
- frame = afr_frame_create (this);
- if (!frame)
- goto out;
-
- ret = afr_selfheal_name_unlocked_inspect (frame, this, parent, pargfid,
- bname, &need_heal);
- if (ret)
- goto out;
-
- if (need_heal) {
- ret = afr_selfheal_name_do (frame, this, parent, pargfid, bname,
- gfid_req);
- if (ret)
- goto out;
- }
+ inode_t *parent = NULL;
+ call_frame_t *frame = NULL;
+ int ret = -1;
+ gf_boolean_t need_heal = _gf_false;
+
+ parent = afr_inode_find(this, pargfid);
+ if (!parent)
+ goto out;
+
+ frame = afr_frame_create(this, NULL);
+ if (!frame)
+ goto out;
+
+ ret = afr_selfheal_name_unlocked_inspect(frame, this, parent, pargfid,
+ bname, &need_heal);
+ if (ret)
+ goto out;
+
+ if (need_heal) {
+ ret = afr_selfheal_name_do(frame, this, parent, pargfid, bname,
+ gfid_req, xdata);
+ if (ret)
+ goto out;
+ }
- ret = 0;
+ ret = 0;
out:
- if (parent)
- inode_unref (parent);
- if (frame)
- AFR_STACK_DESTROY (frame);
+ if (parent)
+ inode_unref(parent);
+ if (frame)
+ AFR_STACK_DESTROY(frame);
- return ret;
+ return ret;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
index 0bee7a023ad..48e6dbcfb18 100644
--- a/xlators/cluster/afr/src/afr-self-heal.h
+++ b/xlators/cluster/afr/src/afr-self-heal.h
@@ -8,262 +8,370 @@
cases as published by the Free Software Foundation.
*/
-
#ifndef _AFR_SELFHEAL_H
#define _AFR_SELFHEAL_H
-#define AFR_SH_MIN_PARTICIPANTS 2
-
/* Perform fop on all UP subvolumes and wait for all callbacks to return */
-#define AFR_ONALL(frame, rfn, fop, args ...) do { \
- afr_local_t *__local = frame->local; \
- afr_private_t *__priv = frame->this->private; \
- int __i = 0, __count = 0; \
- \
- afr_local_replies_wipe (__local, __priv); \
- \
- for (__i = 0; __i < __priv->child_count; __i++) { \
- if (!__priv->child_up[__i]) continue; \
- STACK_WIND_COOKIE (frame, rfn, (void *)(long) __i, \
- __priv->children[__i], \
- __priv->children[__i]->fops->fop, args); \
- __count++; \
- } \
- syncbarrier_wait (&__local->barrier, __count); \
- } while (0)
-
+#define AFR_ONALL(frame, rfn, fop, args...) \
+ do { \
+ afr_local_t *__local = frame->local; \
+ afr_private_t *__priv = frame->this->private; \
+ int __i = 0, __count = 0; \
+ unsigned char *__child_up = alloca(__priv->child_count); \
+ \
+ memcpy(__child_up, __priv->child_up, \
+ sizeof(*__child_up) * __priv->child_count); \
+ __count = AFR_COUNT(__child_up, __priv->child_count); \
+ \
+ __local->barrier.waitfor = __count; \
+ afr_local_replies_wipe(__local, __priv); \
+ \
+ for (__i = 0; __i < __priv->child_count; __i++) { \
+ if (!__child_up[__i]) \
+ continue; \
+ STACK_WIND_COOKIE(frame, rfn, (void *)(long)__i, \
+ __priv->children[__i], \
+ __priv->children[__i]->fops->fop, args); \
+ } \
+ syncbarrier_wait(&__local->barrier, __count); \
+ } while (0)
/* Perform fop on all subvolumes represented by list[] array and wait
for all callbacks to return */
-#define AFR_ONLIST(list, frame, rfn, fop, args ...) do { \
- afr_local_t *__local = frame->local; \
- afr_private_t *__priv = frame->this->private; \
- int __i = 0, __count = 0; \
- \
- afr_local_replies_wipe (__local, __priv); \
- \
- for (__i = 0; __i < __priv->child_count; __i++) { \
- if (!list[__i]) continue; \
- STACK_WIND_COOKIE (frame, rfn, (void *)(long) __i, \
- __priv->children[__i], \
- __priv->children[__i]->fops->fop, args); \
- __count++; \
- } \
- syncbarrier_wait (&__local->barrier, __count); \
- } while (0)
-
-
-#define AFR_SEQ(frame, rfn, fop, args ...) do { \
- afr_local_t *__local = frame->local; \
- afr_private_t *__priv = frame->this->private; \
- int __i = 0; \
- \
- afr_local_replies_wipe (__local, __priv); \
- \
- for (__i = 0; __i < __priv->child_count; __i++) { \
- if (!__priv->child_up[__i]) continue; \
- STACK_WIND_COOKIE (frame, rfn, (void *)(long) __i, \
- __priv->children[__i], \
- __priv->children[__i]->fops->fop, args); \
- syncbarrier_wait (&__local->barrier, 1); \
- } \
- } while (0)
-
-
-#define ALLOC_MATRIX(n, type) ({type **__ptr = NULL; \
- int __i; \
- __ptr = alloca0 (n * sizeof(type *)); \
- for (__i = 0; __i < n; __i++) __ptr[__i] = alloca0 (n * sizeof(type)); \
- __ptr;})
-
-
-#define IA_EQUAL(f,s,field) (memcmp (&(f.ia_##field), &(s.ia_##field), sizeof (s.ia_##field)) == 0)
-
-
-int
-afr_selfheal (xlator_t *this, uuid_t gfid);
+#define AFR_ONLIST(list, frame, rfn, fop, args...) \
+ do { \
+ afr_local_t *__local = frame->local; \
+ afr_private_t *__priv = frame->this->private; \
+ int __i = 0; \
+ int __count = 0; \
+ unsigned char *__list = alloca(__priv->child_count); \
+ \
+ memcpy(__list, list, sizeof(*__list) * __priv->child_count); \
+ __count = AFR_COUNT(__list, __priv->child_count); \
+ __local->barrier.waitfor = __count; \
+ afr_local_replies_wipe(__local, __priv); \
+ \
+ for (__i = 0; __i < __priv->child_count; __i++) { \
+ if (!__list[__i]) \
+ continue; \
+ STACK_WIND_COOKIE(frame, rfn, (void *)(long)__i, \
+ __priv->children[__i], \
+ __priv->children[__i]->fops->fop, args); \
+ } \
+ syncbarrier_wait(&__local->barrier, __count); \
+ } while (0)
+
+#define AFR_SEQ(frame, rfn, fop, args...) \
+ do { \
+ afr_local_t *__local = frame->local; \
+ afr_private_t *__priv = frame->this->private; \
+ int __i = 0; \
+ \
+ afr_local_replies_wipe(__local, __priv); \
+ \
+ for (__i = 0; __i < __priv->child_count; __i++) { \
+ if (!__priv->child_up[__i]) \
+ continue; \
+ STACK_WIND_COOKIE(frame, rfn, (void *)(long)__i, \
+ __priv->children[__i], \
+ __priv->children[__i]->fops->fop, args); \
+ syncbarrier_wait(&__local->barrier, 1); \
+ } \
+ } while (0)
+
+#define ALLOC_MATRIX(n, type) \
+ ({ \
+ int __i; \
+ type **__ptr = alloca(n * sizeof(type *)); \
+ \
+ for (__i = 0; __i < n; __i++) \
+ __ptr[__i] = alloca0(n * sizeof(type)); \
+ __ptr; \
+ })
+
+#define IA_EQUAL(f, s, field) \
+ (memcmp(&(f.ia_##field), &(s.ia_##field), sizeof(s.ia_##field)) == 0)
+
+#define SBRAIN_HEAL_NO_GO_MSG \
+ "Failed to obtain replies from all bricks of " \
+ "the replica (are they up?). Cannot resolve split-brain."
+#define SFILE_NOT_IN_SPLIT_BRAIN "File not in split-brain"
+#define SNO_BIGGER_FILE "No bigger file"
+#define SNO_DIFF_IN_MTIME "No difference in mtime"
+#define SUSE_SOURCE_BRICK_TO_HEAL \
+ "Use source-brick option to heal metadata" \
+ " split-brain"
+#define SINVALID_BRICK_NAME "Invalid brick name"
+#define SBRICK_IS_NOT_UP "Brick is not up"
+#define SBRICK_NOT_CONNECTED "Brick is not connected"
+#define SLESS_THAN2_BRICKS_in_REP "< 2 bricks in replica are up"
+#define SBRICK_IS_REMOTE "Brick is remote"
+#define SSTARTED_SELF_HEAL "Started self-heal"
+#define SOP_NOT_SUPPORTED "Operation Not Supported"
+#define SFILE_NOT_UNDER_DATA \
+ "The file is not under data or metadata " \
+ "split-brain"
+#define SFILE_NOT_IN_SPLIT_BRAIN "File not in split-brain"
+#define SALL_BRICKS_UP_TO_RESOLVE \
+ "All the bricks should be up to resolve the" \
+ " gfid split brain"
+#define SERROR_GETTING_SRC_BRICK "Error getting the source brick"
+int
+afr_selfheal(xlator_t *this, uuid_t gfid);
-void
-afr_throttled_selfheal (call_frame_t *frame, xlator_t *this);
+gf_boolean_t
+afr_throttled_selfheal(call_frame_t *frame, xlator_t *this);
+
+int
+afr_selfheal_name(xlator_t *this, uuid_t gfid, const char *name, void *gfid_req,
+ dict_t *xdata);
+
+int
+afr_selfheal_data(call_frame_t *frame, xlator_t *this, fd_t *fd);
int
-afr_selfheal_name (xlator_t *this, uuid_t gfid, const char *name,
- void *gfid_req);
+afr_selfheal_metadata(call_frame_t *frame, xlator_t *this, inode_t *inode);
int
-afr_selfheal_data (call_frame_t *frame, xlator_t *this, inode_t *inode);
+afr_selfheal_entry(call_frame_t *frame, xlator_t *this, inode_t *inode);
int
-afr_selfheal_metadata (call_frame_t *frame, xlator_t *this, inode_t *inode);
+afr_lookup_and_heal_gfid(xlator_t *this, inode_t *parent, const char *name,
+ inode_t *inode, struct afr_reply *replies, int source,
+ unsigned char *sources, void *gfid, int *gfid_idx);
int
-afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode);
+afr_selfheal_inodelk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, off_t off, size_t size,
+ unsigned char *locked_on);
+int
+afr_selfheal_tryinodelk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, off_t off, size_t size,
+ unsigned char *locked_on);
int
-afr_selfheal_inodelk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, off_t off, size_t size,
- unsigned char *locked_on);
+afr_selfheal_tie_breaker_inodelk(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, char *dom, off_t off,
+ size_t size, unsigned char *locked_on);
int
-afr_selfheal_tryinodelk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, off_t off, size_t size,
- unsigned char *locked_on);
+afr_selfheal_uninodelk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, off_t off, size_t size,
+ const unsigned char *locked_on);
int
-afr_selfheal_uninodelk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, off_t off, size_t size,
- const unsigned char *locked_on);
+afr_selfheal_entrylk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, const char *name, unsigned char *locked_on);
int
-afr_selfheal_entrylk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, const char *name, unsigned char *locked_on);
+afr_selfheal_tryentrylk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, const char *name, unsigned char *locked_on);
int
-afr_selfheal_tryentrylk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, const char *name, unsigned char *locked_on);
+afr_selfheal_tie_breaker_entrylk(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, char *dom, const char *name,
+ unsigned char *locked_on);
int
-afr_selfheal_unentrylk (call_frame_t *frame, xlator_t *this, inode_t *inode,
- char *dom, const char *name, unsigned char *locked_on);
+afr_selfheal_unentrylk(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ char *dom, const char *name, unsigned char *locked_on,
+ dict_t *xdata);
int
-afr_selfheal_unlocked_discover (call_frame_t *frame, inode_t *inode,
- uuid_t gfid, struct afr_reply *replies);
+afr_selfheal_unlocked_discover(call_frame_t *frame, inode_t *inode, uuid_t gfid,
+ struct afr_reply *replies);
+int
+afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
+ uuid_t gfid, struct afr_reply *replies,
+ unsigned char *discover_on, dict_t *dict);
inode_t *
-afr_selfheal_unlocked_lookup_on (call_frame_t *frame, inode_t *parent,
- const char *name, struct afr_reply *replies,
- unsigned char *lookup_on, dict_t *xattr);
+afr_selfheal_unlocked_lookup_on(call_frame_t *frame, inode_t *parent,
+ const char *name, struct afr_reply *replies,
+ unsigned char *lookup_on, dict_t *xattr);
int
-afr_selfheal_find_direction (call_frame_t *frame, xlator_t *this,
- struct afr_reply *replies,
- afr_transaction_type type,
- unsigned char *locked_on, unsigned char *sources,
- unsigned char *sinks, uint64_t *witness,
- gf_boolean_t *flag);
+afr_selfheal_find_direction(call_frame_t *frame, xlator_t *this,
+ struct afr_reply *replies,
+ afr_transaction_type type, unsigned char *locked_on,
+ unsigned char *sources, unsigned char *sinks,
+ uint64_t *witness, unsigned char *flag);
int
-afr_selfheal_fill_matrix (xlator_t *this, int **matrix, int subvol, int idx,
- dict_t *xdata);
+afr_selfheal_fill_matrix(xlator_t *this, int **matrix, int subvol, int idx,
+ dict_t *xdata);
int
-afr_selfheal_extract_xattr (xlator_t *this, struct afr_reply *replies,
- afr_transaction_type type, int *dirty, int **matrix);
+afr_selfheal_extract_xattr(xlator_t *this, struct afr_reply *replies,
+ afr_transaction_type type, int *dirty, int **matrix);
int
-afr_selfheal_undo_pending (call_frame_t *frame, xlator_t *this, inode_t *inode,
- unsigned char *sources, unsigned char *sinks,
- unsigned char *healed_sinks, afr_transaction_type type,
- struct afr_reply *replies, unsigned char *locked_on);
+afr_sh_generic_fop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *pre,
+ struct iatt *post, dict_t *xdata);
+
+int
+afr_selfheal_restore_time(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ int source, unsigned char *healed_sinks,
+ struct afr_reply *replies);
+int
+afr_selfheal_undo_pending(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ unsigned char *sources, unsigned char *sinks,
+ unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ afr_transaction_type type, struct afr_reply *replies,
+ unsigned char *locked_on);
int
-afr_selfheal_recreate_entry (xlator_t *this, int dst, int source, inode_t *dir,
- const char *name, inode_t *inode,
- struct afr_reply *replies,
- unsigned char *newentry);
+afr_selfheal_recreate_entry(call_frame_t *frame, int dst, int source,
+ unsigned char *sources, inode_t *dir,
+ const char *name, inode_t *inode,
+ struct afr_reply *replies);
int
-afr_selfheal_post_op (call_frame_t *frame, xlator_t *this, inode_t *inode,
- int subvol, dict_t *xattr);
+afr_selfheal_post_op(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ int subvol, dict_t *xattr, dict_t *xdata);
call_frame_t *
-afr_frame_create (xlator_t *this);
+afr_frame_create(xlator_t *this, int32_t *op_errno);
inode_t *
-afr_inode_find (xlator_t *this, uuid_t gfid);
+afr_inode_find(xlator_t *this, uuid_t gfid);
int
-afr_selfheal_discover_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, inode_t *inode,
- struct iatt *buf, dict_t *xdata,
- struct iatt *parbuf);
+afr_selfheal_discover_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata, struct iatt *parbuf);
+void
+afr_reply_copy(struct afr_reply *dst, struct afr_reply *src);
void
-afr_replies_copy (struct afr_reply *dst, struct afr_reply *src, int count);
+afr_replies_copy(struct afr_reply *dst, struct afr_reply *src, int count);
int
-afr_selfheal_newentry_mark (call_frame_t *frame, xlator_t *this, inode_t *inode,
- int source, struct afr_reply *replies,
- unsigned char *sources, unsigned char *newentry);
+afr_selfheal_newentry_mark(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ int source, struct afr_reply *replies,
+ unsigned char *sources, unsigned char *newentry);
unsigned int
-afr_success_count (struct afr_reply *replies, unsigned int count);
+afr_success_count(struct afr_reply *replies, unsigned int count);
void
-afr_log_selfheal (uuid_t gfid, xlator_t *this, int ret, char *type,
- int source, unsigned char *healed_sinks);
+afr_log_selfheal(uuid_t gfid, xlator_t *this, int ret, char *type, int source,
+ unsigned char *sources, unsigned char *healed_sinks);
void
-afr_mark_largest_file_as_source (xlator_t *this, unsigned char *sources,
- struct afr_reply *replies);
+afr_mark_largest_file_as_source(xlator_t *this, unsigned char *sources,
+ struct afr_reply *replies);
void
-afr_mark_active_sinks (xlator_t *this, unsigned char *sources,
- unsigned char *locked_on, unsigned char *sinks);
+afr_mark_active_sinks(xlator_t *this, unsigned char *sources,
+ unsigned char *locked_on, unsigned char *sinks);
gf_boolean_t
-afr_dict_contains_heal_op (call_frame_t *frame);
+afr_dict_contains_heal_op(call_frame_t *frame);
+gf_boolean_t
+afr_can_decide_split_brain_source_sinks(struct afr_reply *replies,
+ int child_count);
int
-afr_mark_split_brain_source_sinks (call_frame_t *frame, xlator_t *this,
- unsigned char *sources,
- unsigned char *sinks,
- unsigned char *healed_sinks,
- unsigned char *locked_on,
- struct afr_reply *replies,
- afr_transaction_type type);
+afr_mark_split_brain_source_sinks(
+ call_frame_t *frame, xlator_t *this, inode_t *inode, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks, unsigned char *locked_on,
+ struct afr_reply *replies, afr_transaction_type type);
int
-afr_get_child_index_from_name (xlator_t *this, char *name);
+afr_sh_get_fav_by_policy(xlator_t *this, struct afr_reply *replies,
+ inode_t *inode, char **policy_str);
+
+int
+_afr_fav_child_reset_sink_xattrs(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, int source,
+ unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ afr_transaction_type type,
+ unsigned char *locked_on,
+ struct afr_reply *replies);
+
+int
+afr_get_child_index_from_name(xlator_t *this, char *name);
gf_boolean_t
-afr_does_witness_exist (xlator_t *this, uint64_t *witness);
+afr_does_witness_exist(xlator_t *this, uint64_t *witness);
+
+int
+__afr_selfheal_data_prepare(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ unsigned char *locked_on, unsigned char *sources,
+ unsigned char *sinks, unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ struct afr_reply *replies, unsigned char *flag);
int
-__afr_selfheal_data_prepare (call_frame_t *frame, xlator_t *this,
+__afr_selfheal_metadata_prepare(call_frame_t *frame, xlator_t *this,
+ inode_t *inode, unsigned char *locked_on,
+ unsigned char *sources, unsigned char *sinks,
+ unsigned char *healed_sinks,
+ unsigned char *undid_pending,
+ struct afr_reply *replies, unsigned char *flag);
+int
+__afr_selfheal_entry_prepare(call_frame_t *frame, xlator_t *this,
inode_t *inode, unsigned char *locked_on,
- unsigned char *sources,
- unsigned char *sinks, unsigned char *healed_sinks,
- struct afr_reply *replies,
- gf_boolean_t *flag);
+ unsigned char *sources, unsigned char *sinks,
+ unsigned char *healed_sinks,
+ struct afr_reply *replies, int *source_p,
+ unsigned char *flag);
int
-__afr_selfheal_metadata_prepare (call_frame_t *frame, xlator_t *this,
- inode_t *inode, unsigned char *locked_on,
- unsigned char *sources,
- unsigned char *sinks,
- unsigned char *healed_sinks,
- struct afr_reply *replies,
- gf_boolean_t *flag);
+afr_selfheal_unlocked_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+ inode_t **link_inode, gf_boolean_t *data_selfheal,
+ gf_boolean_t *metadata_selfheal,
+ gf_boolean_t *entry_selfheal,
+ struct afr_reply *replies);
+
int
-__afr_selfheal_entry_prepare (call_frame_t *frame, xlator_t *this,
- inode_t *inode, unsigned char *locked_on,
- unsigned char *sources,
- unsigned char *sinks,
- unsigned char *healed_sinks,
- struct afr_reply *replies, int *source_p,
- gf_boolean_t *flag);
+afr_selfheal_do(call_frame_t *frame, xlator_t *this, uuid_t gfid);
int
-afr_selfheal_unlocked_inspect (call_frame_t *frame, xlator_t *this,
- uuid_t gfid, inode_t **link_inode,
- gf_boolean_t *data_selfheal,
- gf_boolean_t *metadata_selfheal,
- gf_boolean_t *entry_selfheal);
+afr_selfheal_lock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xdata);
int
-afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid);
+afr_locked_fill(call_frame_t *frame, xlator_t *this, unsigned char *locked_on);
+int
+afr_choose_source_by_policy(afr_private_t *priv, unsigned char *sources,
+ afr_transaction_type type);
int
-afr_selfheal_lock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, dict_t *xdata);
+afr_selfheal_metadata_by_stbuf(xlator_t *this, struct iatt *stbuf);
+
+int
+afr_sh_fav_by_size(xlator_t *this, struct afr_reply *replies, inode_t *inode);
+int
+afr_sh_fav_by_mtime(xlator_t *this, struct afr_reply *replies, inode_t *inode);
+int
+afr_sh_fav_by_ctime(xlator_t *this, struct afr_reply *replies, inode_t *inode);
+
+int
+afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+ inode_t *inode, uuid_t pargfid, const char *bname,
+ int src_idx, int child_idx,
+ unsigned char *locked_on, int *src, dict_t *xdata);
+int
+afr_mark_source_sinks_if_file_empty(xlator_t *this, unsigned char *sources,
+ unsigned char *sinks,
+ unsigned char *healed_sinks,
+ unsigned char *locked_on,
+ struct afr_reply *replies,
+ afr_transaction_type type);
+
+gf_boolean_t
+afr_is_file_empty_on_all_children(afr_private_t *priv,
+ struct afr_reply *replies);
int
-afr_locked_fill (call_frame_t *frame, xlator_t *this,
- unsigned char *locked_on);
+afr_selfheal_entry_delete(xlator_t *this, inode_t *dir, const char *name,
+ inode_t *inode, int child, struct afr_reply *replies);
int
-afr_choose_source_by_policy (afr_private_t *priv, unsigned char *sources,
- afr_transaction_type type);
+afr_anon_inode_create(xlator_t *this, int child, inode_t **linked_inode);
#endif /* !_AFR_SELFHEAL_H */
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
index 1ae4f18e764..109fd4b7421 100644
--- a/xlators/cluster/afr/src/afr-self-heald.c
+++ b/xlators/cluster/afr/src/afr-self-heald.c
@@ -8,1171 +8,1709 @@
cases as published by the Free Software Foundation.
*/
-
#include "afr.h"
#include "afr-self-heal.h"
#include "afr-self-heald.h"
#include "protocol-common.h"
-#include "syncop-utils.h"
+#include <glusterfs/syncop-utils.h>
#include "afr-messages.h"
-
-#define SHD_INODE_LRU_LIMIT 2048
-#define AFR_EH_SPLIT_BRAIN_LIMIT 1024
-#define AFR_STATISTICS_HISTORY_SIZE 50
-
-
-#define ASSERT_LOCAL(this, healer) \
- if (!afr_shd_is_subvol_local(this, healer->subvol)) { \
- healer->local = _gf_false; \
- if (safe_break (healer)) { \
- break; \
- } else { \
- continue; \
- } \
- } else { \
- healer->local = _gf_true; \
- }
-
-
-#define NTH_INDEX_HEALER(this, n) &((((afr_private_t *)this->private))->shd.index_healers[n])
-#define NTH_FULL_HEALER(this, n) &((((afr_private_t *)this->private))->shd.full_healers[n])
+#include <glusterfs/byte-order.h>
+
+#define AFR_EH_SPLIT_BRAIN_LIMIT 1024
+#define AFR_STATISTICS_HISTORY_SIZE 50
+
+#define ASSERT_LOCAL(this, healer) \
+ if (!afr_shd_is_subvol_local(this, healer->subvol)) { \
+ healer->local = _gf_false; \
+ if (safe_break(healer)) { \
+ break; \
+ } else { \
+ continue; \
+ } \
+ } else { \
+ healer->local = _gf_true; \
+ }
+
+#define NTH_INDEX_HEALER(this, n) \
+ &((((afr_private_t *)this->private))->shd.index_healers[n])
+#define NTH_FULL_HEALER(this, n) \
+ &((((afr_private_t *)this->private))->shd.full_healers[n])
char *
-afr_subvol_name (xlator_t *this, int subvol)
+afr_subvol_name(xlator_t *this, int subvol)
{
- afr_private_t *priv = NULL;
+ afr_private_t *priv = NULL;
- priv = this->private;
- if (subvol < 0 || subvol > priv->child_count)
- return NULL;
+ priv = this->private;
+ if (subvol < 0 || subvol > priv->child_count)
+ return NULL;
- return priv->children[subvol]->name;
+ return priv->children[subvol]->name;
}
-
void
-afr_destroy_crawl_event_data (void *data)
+afr_destroy_crawl_event_data(void *data)
{
- return;
+ return;
}
-
void
-afr_destroy_shd_event_data (void *data)
+afr_destroy_shd_event_data(void *data)
{
- shd_event_t *shd_event = data;
-
- if (!shd_event)
- return;
- GF_FREE (shd_event->path);
+ shd_event_t *shd_event = data;
+ if (!shd_event)
return;
-}
+ GF_FREE(shd_event->path);
+ return;
+}
gf_boolean_t
-afr_shd_is_subvol_local (xlator_t *this, int subvol)
+afr_shd_is_subvol_local(xlator_t *this, int subvol)
{
- afr_private_t *priv = NULL;
- gf_boolean_t is_local = _gf_false;
- loc_t loc = {0, };
-
- loc.inode = this->itable->root;
- gf_uuid_copy (loc.gfid, loc.inode->gfid);
- priv = this->private;
- syncop_is_subvol_local(priv->children[subvol], &loc, &is_local);
- return is_local;
+ afr_private_t *priv = NULL;
+ gf_boolean_t is_local = _gf_false;
+ loc_t loc = {
+ 0,
+ };
+
+ loc.inode = this->itable->root;
+ gf_uuid_copy(loc.gfid, loc.inode->gfid);
+ priv = this->private;
+ syncop_is_subvol_local(priv->children[subvol], &loc, &is_local);
+ return is_local;
}
-
int
-__afr_shd_healer_wait (struct subvol_healer *healer)
+__afr_shd_healer_wait(struct subvol_healer *healer)
{
- afr_private_t *priv = NULL;
- struct timespec wait_till = {0, };
- int ret = 0;
+ afr_private_t *priv = NULL;
+ struct timespec wait_till = {
+ 0,
+ };
+ int ret = 0;
- priv = healer->this->private;
+ priv = healer->this->private;
disabled_loop:
- wait_till.tv_sec = time (NULL) + priv->shd.timeout;
+ wait_till.tv_sec = gf_time() + priv->shd.timeout;
- while (!healer->rerun) {
- ret = pthread_cond_timedwait (&healer->cond,
- &healer->mutex,
- &wait_till);
- if (ret == ETIMEDOUT)
- break;
- }
+ while (!healer->rerun) {
+ ret = pthread_cond_timedwait(&healer->cond, &healer->mutex, &wait_till);
+ if (ret == ETIMEDOUT)
+ break;
+ }
- ret = healer->rerun;
- healer->rerun = 0;
+ ret = healer->rerun;
+ healer->rerun = 0;
- if (!priv->shd.enabled)
- goto disabled_loop;
+ if (!priv->shd.enabled)
+ goto disabled_loop;
- return ret;
+ return ret;
}
-
int
-afr_shd_healer_wait (struct subvol_healer *healer)
+afr_shd_healer_wait(struct subvol_healer *healer)
{
- int ret = 0;
+ int ret = 0;
- pthread_mutex_lock (&healer->mutex);
- {
- ret = __afr_shd_healer_wait (healer);
- }
- pthread_mutex_unlock (&healer->mutex);
+ pthread_mutex_lock(&healer->mutex);
+ {
+ ret = __afr_shd_healer_wait(healer);
+ }
+ pthread_mutex_unlock(&healer->mutex);
- return ret;
+ return ret;
}
-
gf_boolean_t
-safe_break (struct subvol_healer *healer)
+safe_break(struct subvol_healer *healer)
{
- gf_boolean_t ret = _gf_false;
+ gf_boolean_t ret = _gf_false;
- pthread_mutex_lock (&healer->mutex);
- {
- if (healer->rerun)
- goto unlock;
+ pthread_mutex_lock(&healer->mutex);
+ {
+ if (healer->rerun)
+ goto unlock;
- healer->running = _gf_false;
- ret = _gf_true;
- }
+ healer->running = _gf_false;
+ ret = _gf_true;
+ }
unlock:
- pthread_mutex_unlock (&healer->mutex);
+ pthread_mutex_unlock(&healer->mutex);
- return ret;
+ return ret;
}
-
inode_t *
-afr_shd_inode_find (xlator_t *this, xlator_t *subvol, uuid_t gfid)
+afr_shd_inode_find(xlator_t *this, xlator_t *subvol, uuid_t gfid)
{
- inode_t *inode = NULL;
- int ret = 0;
- loc_t loc = {0, };
- struct iatt iatt = {0, };
-
- inode = inode_find (this->itable, gfid);
- if (inode)
- goto out;
-
- loc.inode = inode_new (this->itable);
- if (!loc.inode)
- goto out;
- gf_uuid_copy (loc.gfid, gfid);
-
- ret = syncop_lookup (subvol, &loc, &iatt, NULL, NULL, NULL);
- if (ret < 0)
- goto out;
-
- inode = inode_link (loc.inode, NULL, NULL, &iatt);
+ int ret = 0;
+ uint64_t val = IA_INVAL;
+ dict_t *xdata = NULL;
+ dict_t *rsp_dict = NULL;
+ inode_t *inode = NULL;
+
+ xdata = dict_new();
+ if (!xdata)
+ goto out;
+
+ ret = dict_set_int8(xdata, GF_INDEX_IA_TYPE_GET_REQ, 1);
+ if (ret)
+ goto out;
+
+ ret = syncop_inode_find(this, subvol, gfid, &inode, xdata, &rsp_dict);
+ if (ret < 0)
+ goto out;
+
+ if (rsp_dict) {
+ ret = dict_get_uint64(rsp_dict, GF_INDEX_IA_TYPE_GET_RSP, &val);
+ if (ret)
+ goto out;
+ }
+ ret = inode_ctx_set2(inode, subvol, 0, &val);
out:
- loc_wipe (&loc);
- return inode;
+ if (ret && inode) {
+ inode_unref(inode);
+ inode = NULL;
+ }
+ if (xdata)
+ dict_unref(xdata);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+ return inode;
}
-inode_t*
-afr_shd_index_inode (xlator_t *this, xlator_t *subvol, char *vgfid)
+inode_t *
+afr_shd_index_inode(xlator_t *this, xlator_t *subvol, char *vgfid)
{
- loc_t rootloc = {0, };
- inode_t *inode = NULL;
- int ret = 0;
- dict_t *xattr = NULL;
- void *index_gfid = NULL;
+ loc_t rootloc = {
+ 0,
+ };
+ inode_t *inode = NULL;
+ int ret = 0;
+ dict_t *xattr = NULL;
+ void *index_gfid = NULL;
- rootloc.inode = inode_ref (this->itable->root);
- gf_uuid_copy (rootloc.gfid, rootloc.inode->gfid);
+ rootloc.inode = inode_ref(this->itable->root);
+ gf_uuid_copy(rootloc.gfid, rootloc.inode->gfid);
- ret = syncop_getxattr (subvol, &rootloc, &xattr,
- vgfid, NULL, NULL);
- if (ret || !xattr) {
- errno = -ret;
- goto out;
- }
+ ret = syncop_getxattr(subvol, &rootloc, &xattr, vgfid, NULL, NULL);
+ if (ret || !xattr) {
+ errno = -ret;
+ goto out;
+ }
- ret = dict_get_ptr (xattr, vgfid, &index_gfid);
- if (ret)
- goto out;
+ ret = dict_get_ptr(xattr, vgfid, &index_gfid);
+ if (ret)
+ goto out;
- gf_msg_debug (this->name, 0, "%s dir gfid for %s: %s",
- vgfid, subvol->name, uuid_utoa (index_gfid));
+ gf_msg_debug(this->name, 0, "%s dir gfid for %s: %s", vgfid, subvol->name,
+ uuid_utoa(index_gfid));
- inode = afr_shd_inode_find (this, subvol, index_gfid);
+ inode = afr_shd_inode_find(this, subvol, index_gfid);
out:
- loc_wipe (&rootloc);
+ loc_wipe(&rootloc);
- if (xattr)
- dict_unref (xattr);
+ if (xattr)
+ dict_unref(xattr);
- return inode;
+ return inode;
}
int
-afr_shd_index_purge (xlator_t *subvol, inode_t *inode, char *name)
+afr_shd_entry_purge(xlator_t *subvol, inode_t *inode, char *name,
+ ia_type_t type)
{
- loc_t loc = {0, };
- int ret = 0;
+ int ret = 0;
+ loc_t loc = {
+ 0,
+ };
- loc.parent = inode_ref (inode);
- loc.name = name;
+ loc.parent = inode_ref(inode);
+ loc.name = name;
- ret = syncop_unlink (subvol, &loc, NULL, NULL);
+ if (IA_ISDIR(type))
+ ret = syncop_rmdir(subvol, &loc, 1, NULL, NULL);
+ else
+ ret = syncop_unlink(subvol, &loc, NULL, NULL);
- loc_wipe (&loc);
- return ret;
+ loc_wipe(&loc);
+ return ret;
}
void
-afr_shd_zero_xattrop (xlator_t *this, uuid_t gfid)
+afr_shd_zero_xattrop(xlator_t *this, uuid_t gfid)
{
-
- call_frame_t *frame = NULL;
- inode_t *inode = NULL;
- afr_private_t *priv = NULL;
- dict_t *xattr = NULL;
- int ret = 0;
- int i = 0;
- int raw[AFR_NUM_CHANGE_LOGS] = {0};
-
- priv = this->private;
- frame = afr_frame_create (this);
- if (!frame)
- goto out;
- inode = afr_inode_find (this, gfid);
- if (!inode)
- goto out;
- xattr = dict_new();
- if (!xattr)
- goto out;
- ret = dict_set_static_bin (xattr, AFR_DIRTY, raw,
- sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ call_frame_t *frame = NULL;
+ inode_t *inode = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xattr = NULL;
+ int ret = 0;
+ int i = 0;
+ int raw[AFR_NUM_CHANGE_LOGS] = {0};
+
+ priv = this->private;
+ frame = afr_frame_create(this, NULL);
+ if (!frame)
+ goto out;
+ inode = afr_inode_find(this, gfid);
+ if (!inode)
+ goto out;
+ xattr = dict_new();
+ if (!xattr)
+ goto out;
+ ret = dict_set_static_bin(xattr, AFR_DIRTY, raw,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ if (ret)
+ goto out;
+ for (i = 0; i < priv->child_count; i++) {
+ ret = dict_set_static_bin(xattr, priv->pending_key[i], raw,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
if (ret)
- goto out;
- for (i = 0; i < priv->child_count; i++) {
- ret = dict_set_static_bin (xattr, priv->pending_key[i], raw,
- sizeof(int) * AFR_NUM_CHANGE_LOGS);
- if (ret)
- goto out;
- }
+ goto out;
+ }
- /*Send xattrop to all bricks. Doing a lookup to see if bricks are up or
- * has valid repies for this gfid seems a bit of an overkill.*/
- for (i = 0; i < priv->child_count; i++)
- afr_selfheal_post_op (frame, this, inode, i, xattr);
+ /*Send xattrop to all bricks. Doing a lookup to see if bricks are up or
+ * has valid repies for this gfid seems a bit of an overkill.*/
+ for (i = 0; i < priv->child_count; i++)
+ afr_selfheal_post_op(frame, this, inode, i, xattr, NULL);
out:
- if (frame)
- AFR_STACK_DESTROY (frame);
- if (inode)
- inode_unref (inode);
- if (xattr)
- dict_unref (xattr);
- return;
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ if (inode)
+ inode_unref(inode);
+ if (xattr)
+ dict_unref(xattr);
+ return;
}
int
-afr_shd_selfheal_name (struct subvol_healer *healer, int child, uuid_t parent,
- const char *bname)
+afr_shd_selfheal_name(struct subvol_healer *healer, int child, uuid_t parent,
+ const char *bname)
{
- int ret = -1;
+ int ret = -1;
- ret = afr_selfheal_name (THIS, parent, bname, NULL);
+ ret = afr_selfheal_name(THIS, parent, bname, NULL, NULL);
- return ret;
+ return ret;
}
int
-afr_shd_selfheal (struct subvol_healer *healer, int child, uuid_t gfid)
+afr_shd_selfheal(struct subvol_healer *healer, int child, uuid_t gfid)
{
- int ret = 0;
- eh_t *eh = NULL;
- afr_private_t *priv = NULL;
- afr_self_heald_t *shd = NULL;
- shd_event_t *shd_event = NULL;
- char *path = NULL;
- xlator_t *subvol = NULL;
- xlator_t *this = NULL;
- crawl_event_t *crawl_event = NULL;
-
- this = healer->this;
- priv = this->private;
- shd = &priv->shd;
- crawl_event = &healer->crawl_event;
-
- subvol = priv->children[child];
-
- //If this fails with ENOENT/ESTALE index is stale
- ret = syncop_gfid_to_path (this->itable, subvol, gfid, &path);
- if (ret < 0)
- return ret;
-
- ret = afr_selfheal (this, gfid);
-
- LOCK (&priv->lock);
- {
- if (ret == -EIO) {
- eh = shd->split_brain;
- crawl_event->split_brain_count++;
- } else if (ret < 0) {
- crawl_event->heal_failed_count++;
- } else if (ret == 0) {
- crawl_event->healed_count++;
- }
+ int ret = 0;
+ eh_t *eh = NULL;
+ afr_private_t *priv = NULL;
+ afr_self_heald_t *shd = NULL;
+ shd_event_t *shd_event = NULL;
+ char *path = NULL;
+ xlator_t *subvol = NULL;
+ xlator_t *this = NULL;
+ crawl_event_t *crawl_event = NULL;
+
+ this = healer->this;
+ priv = this->private;
+ shd = &priv->shd;
+ crawl_event = &healer->crawl_event;
+
+ subvol = priv->children[child];
+
+ // If this fails with ENOENT/ESTALE index is stale
+ ret = syncop_gfid_to_path(this->itable, subvol, gfid, &path);
+ if (ret < 0)
+ return ret;
+
+ ret = afr_selfheal(this, gfid);
+
+ LOCK(&priv->lock);
+ {
+ if (ret == -EIO) {
+ eh = shd->split_brain;
+ crawl_event->split_brain_count++;
+ } else if (ret < 0) {
+ crawl_event->heal_failed_count++;
+ } else if (ret == 0) {
+ crawl_event->healed_count++;
}
- UNLOCK (&priv->lock);
+ }
+ UNLOCK(&priv->lock);
- if (eh) {
- shd_event = GF_CALLOC (1, sizeof(*shd_event),
- gf_afr_mt_shd_event_t);
- if (!shd_event)
- goto out;
+ if (eh) {
+ shd_event = GF_CALLOC(1, sizeof(*shd_event), gf_afr_mt_shd_event_t);
+ if (!shd_event)
+ goto out;
- shd_event->child = child;
- shd_event->path = path;
+ shd_event->child = child;
+ shd_event->path = path;
- if (eh_save_history (eh, shd_event) < 0)
- goto out;
+ if (eh_save_history(eh, shd_event) < 0)
+ goto out;
- shd_event = NULL;
- path = NULL;
- }
+ shd_event = NULL;
+ path = NULL;
+ }
out:
- GF_FREE (shd_event);
- GF_FREE (path);
- return ret;
+ GF_FREE(shd_event);
+ GF_FREE(path);
+ return ret;
}
-
void
-afr_shd_sweep_prepare (struct subvol_healer *healer)
+afr_shd_sweep_prepare(struct subvol_healer *healer)
{
- crawl_event_t *event = NULL;
+ crawl_event_t *event = NULL;
- event = &healer->crawl_event;
+ event = &healer->crawl_event;
- event->healed_count = 0;
- event->split_brain_count = 0;
- event->heal_failed_count = 0;
+ event->healed_count = 0;
+ event->split_brain_count = 0;
+ event->heal_failed_count = 0;
- time (&event->start_time);
- event->end_time = 0;
+ event->start_time = gf_time();
+ event->end_time = 0;
+ _mask_cancellation();
}
-
void
-afr_shd_sweep_done (struct subvol_healer *healer)
+afr_shd_sweep_done(struct subvol_healer *healer)
{
- crawl_event_t *event = NULL;
- crawl_event_t *history = NULL;
- afr_self_heald_t *shd = NULL;
+ crawl_event_t *event = NULL;
+ crawl_event_t *history = NULL;
+ afr_self_heald_t *shd = NULL;
- event = &healer->crawl_event;
- shd = &(((afr_private_t *)healer->this->private)->shd);
+ event = &healer->crawl_event;
+ shd = &(((afr_private_t *)healer->this->private)->shd);
- time (&event->end_time);
- history = memdup (event, sizeof (*event));
- event->start_time = 0;
+ event->end_time = gf_time();
+ history = gf_memdup(event, sizeof(*event));
+ event->start_time = 0;
- if (!history)
- return;
+ if (!history)
+ return;
- if (eh_save_history (shd->statistics[healer->subvol], history) < 0)
- GF_FREE (history);
+ if (eh_save_history(shd->statistics[healer->subvol], history) < 0)
+ GF_FREE(history);
+ _unmask_cancellation();
}
int
-afr_shd_index_heal (xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
- void *data)
+afr_shd_index_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+ void *data)
{
- struct subvol_healer *healer = data;
- afr_private_t *priv = NULL;
- uuid_t gfid = {0};
- int ret = 0;
+ struct subvol_healer *healer = data;
+ afr_private_t *priv = NULL;
+ uuid_t gfid = {0};
+ int ret = 0;
+ uint64_t val = IA_INVAL;
- priv = healer->this->private;
- if (!priv->shd.enabled)
- return -EBUSY;
+ priv = healer->this->private;
+ if (!priv->shd.enabled)
+ return -EBUSY;
- gf_msg_debug (healer->this->name, 0, "got entry: %s",
- entry->d_name);
+ gf_msg_debug(healer->this->name, 0, "got entry: %s from %s", entry->d_name,
+ priv->children[healer->subvol]->name);
- ret = gf_uuid_parse (entry->d_name, gfid);
- if (ret)
- return 0;
+ ret = gf_uuid_parse(entry->d_name, gfid);
+ if (ret)
+ return 0;
- ret = afr_shd_selfheal (healer, healer->subvol, gfid);
+ inode_ctx_get2(parent->inode, subvol, NULL, &val);
- if (ret == -ENOENT || ret == -ESTALE)
- afr_shd_index_purge (subvol, parent->inode, entry->d_name);
- if (ret == 2)
- /* If bricks crashed in pre-op after creating indices/xattrop
- * link but before setting afr changelogs, we end up with stale
- * xattrop links but zero changelogs. Remove such entries by
- * sending a post-op with zero changelogs.
- */
- afr_shd_zero_xattrop (healer->this, gfid);
+ ret = afr_shd_selfheal(healer, healer->subvol, gfid);
- return 0;
+ if (ret == -ENOENT || ret == -ESTALE)
+ afr_shd_entry_purge(subvol, parent->inode, entry->d_name, val);
+
+ if (ret == 2)
+ /* If bricks crashed in pre-op after creating indices/xattrop
+ * link but before setting afr changelogs, we end up with stale
+ * xattrop links but zero changelogs. Remove such entries by
+ * sending a post-op with zero changelogs.
+ */
+ afr_shd_zero_xattrop(healer->this, gfid);
+
+ return 0;
}
int
-afr_shd_index_sweep (struct subvol_healer *healer, char *vgfid)
+afr_shd_index_sweep(struct subvol_healer *healer, char *vgfid)
{
- loc_t loc = {0};
- afr_private_t *priv = NULL;
- int ret = 0;
- xlator_t *subvol = NULL;
- dict_t *xdata = NULL;
-
- priv = healer->this->private;
- subvol = priv->children[healer->subvol];
-
- loc.inode = afr_shd_index_inode (healer->this, subvol, vgfid);
- if (!loc.inode) {
- gf_msg (healer->this->name, GF_LOG_WARNING,
- 0, AFR_MSG_INDEX_DIR_GET_FAILED,
- "unable to get index-dir on %s", subvol->name);
- ret = -errno;
- goto out;
- }
-
- xdata = dict_new ();
- if (!xdata || dict_set_int32 (xdata, "get-gfid-type", 1)) {
- ret = -ENOMEM;
- goto out;
- }
+ loc_t loc = {0};
+ afr_private_t *priv = NULL;
+ int ret = 0;
+ xlator_t *subvol = NULL;
+ dict_t *xdata = NULL;
+ call_frame_t *frame = NULL;
+
+ priv = healer->this->private;
+ subvol = priv->children[healer->subvol];
+
+ frame = afr_frame_create(healer->this, &ret);
+ if (!frame) {
+ ret = -ret;
+ goto out;
+ }
+
+ loc.inode = afr_shd_index_inode(healer->this, subvol, vgfid);
+ if (!loc.inode) {
+ gf_msg(healer->this->name, GF_LOG_WARNING, 0,
+ AFR_MSG_INDEX_DIR_GET_FAILED, "unable to get index-dir on %s",
+ subvol->name);
+ ret = -errno;
+ goto out;
+ }
+
+ xdata = dict_new();
+ if (!xdata || dict_set_int32_sizen(xdata, "get-gfid-type", 1)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = syncop_mt_dir_scan(frame, subvol, &loc, GF_CLIENT_PID_SELF_HEALD,
+ healer, afr_shd_index_heal, xdata,
+ priv->shd.max_threads, priv->shd.wait_qlength);
+
+ if (ret == 0)
+ ret = healer->crawl_event.healed_count;
- ret = syncop_mt_dir_scan (subvol, &loc, GF_CLIENT_PID_SELF_HEALD,
- healer, afr_shd_index_heal, xdata,
- priv->shd.max_threads, priv->shd.wait_qlength);
+out:
+ loc_wipe(&loc);
- if (ret == 0)
- ret = healer->crawl_event.healed_count;
+ if (xdata)
+ dict_unref(xdata);
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ return ret;
+}
+int
+afr_shd_index_sweep_all(struct subvol_healer *healer)
+{
+ int ret = 0;
+ int count = 0;
+
+ ret = afr_shd_index_sweep(healer, GF_XATTROP_INDEX_GFID);
+ if (ret < 0)
+ goto out;
+ count = ret;
+
+ ret = afr_shd_index_sweep(healer, GF_XATTROP_DIRTY_GFID);
+ if (ret < 0)
+ goto out;
+ count += ret;
+
+ ret = afr_shd_index_sweep(healer, GF_XATTROP_ENTRY_CHANGES_GFID);
+ if (ret < 0)
+ goto out;
+ count += ret;
out:
- loc_wipe (&loc);
-
- if (xdata)
- dict_unref (xdata);
- return ret;
+ if (ret < 0)
+ return ret;
+ else
+ return count;
}
int
-afr_shd_index_sweep_all (struct subvol_healer *healer)
+afr_shd_full_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+ void *data)
{
- int ret = 0;
- int count = 0;
+ struct subvol_healer *healer = data;
+ xlator_t *this = healer->this;
+ afr_private_t *priv = NULL;
- ret = afr_shd_index_sweep (healer, GF_XATTROP_INDEX_GFID);
- if (ret < 0)
- goto out;
- count = ret;
+ priv = this->private;
- ret = afr_shd_index_sweep (healer, GF_XATTROP_DIRTY_GFID);
- if (ret < 0)
- goto out;
- count += ret;
+ if (this->cleanup_starting) {
+ return -ENOTCONN;
+ }
-out:
- if (ret < 0)
- return ret;
- else
- return count;
+ if (!priv->shd.enabled)
+ return -EBUSY;
+
+ afr_shd_selfheal_name(healer, healer->subvol, parent->inode->gfid,
+ entry->d_name);
+
+ afr_shd_selfheal(healer, healer->subvol, entry->d_stat.ia_gfid);
+
+ return 0;
}
int
-afr_shd_full_heal (xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
- void *data)
+afr_shd_full_sweep(struct subvol_healer *healer, inode_t *inode)
{
- struct subvol_healer *healer = data;
- xlator_t *this = healer->this;
- afr_private_t *priv = NULL;
+ afr_private_t *priv = NULL;
+ loc_t loc = {0};
- priv = this->private;
- if (!priv->shd.enabled)
- return -EBUSY;
+ priv = healer->this->private;
+ loc.inode = inode;
+ return syncop_ftw(priv->children[healer->subvol], &loc,
+ GF_CLIENT_PID_SELF_HEALD, healer, afr_shd_full_heal);
+}
- afr_shd_selfheal_name (healer, healer->subvol,
- parent->inode->gfid, entry->d_name);
+int
+afr_shd_fill_ta_loc(xlator_t *this, loc_t *loc)
+{
+ afr_private_t *priv = NULL;
+ struct iatt stbuf = {
+ 0,
+ };
+ int ret = -1;
+
+ priv = this->private;
+ loc->parent = inode_ref(this->itable->root);
+ gf_uuid_copy(loc->pargfid, loc->parent->gfid);
+ loc->name = priv->pending_key[THIN_ARBITER_BRICK_INDEX];
+ loc->inode = inode_new(loc->parent->table);
+ GF_CHECK_ALLOC(loc->inode, ret, out);
+
+ if (!gf_uuid_is_null(priv->ta_gfid))
+ goto assign_gfid;
+
+ ret = syncop_lookup(priv->children[THIN_ARBITER_BRICK_INDEX], loc, &stbuf,
+ 0, 0, 0);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed lookup on file %s.", loc->name);
+ goto out;
+ }
+
+ gf_uuid_copy(priv->ta_gfid, stbuf.ia_gfid);
+
+assign_gfid:
+ gf_uuid_copy(loc->gfid, priv->ta_gfid);
+ ret = 0;
- afr_shd_selfheal (healer, healer->subvol, entry->d_stat.ia_gfid);
+out:
+ if (ret)
+ loc_wipe(loc);
- return 0;
+ return ret;
}
int
-afr_shd_full_sweep (struct subvol_healer *healer, inode_t *inode)
+_afr_shd_ta_get_xattrs(xlator_t *this, loc_t *loc, dict_t **xdata)
{
- afr_private_t *priv = NULL;
- loc_t loc = {0};
-
- priv = healer->this->private;
- loc.inode = inode;
- return syncop_ftw (priv->children[healer->subvol], &loc,
- GF_CLIENT_PID_SELF_HEALD, healer,
- afr_shd_full_heal);
-}
+ afr_private_t *priv = NULL;
+ dict_t *xattr = NULL;
+ int raw[AFR_NUM_CHANGE_LOGS] = {
+ 0,
+ };
+ int ret = -1;
+ int i = 0;
+
+ priv = this->private;
+
+ xattr = dict_new();
+ if (!xattr) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_DICT_GET_FAILED,
+ "Failed to create dict.");
+ goto out;
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ ret = dict_set_static_bin(xattr, priv->pending_key[i], &raw,
+ AFR_NUM_CHANGE_LOGS * sizeof(int));
+ if (ret)
+ goto out;
+ }
+ ret = syncop_xattrop(priv->children[THIN_ARBITER_BRICK_INDEX], loc,
+ GF_XATTROP_ADD_ARRAY, xattr, NULL, xdata, NULL);
+ if (ret || !(*xdata)) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Xattrop failed on %s.", loc->name);
+ }
-void *
-afr_shd_index_healer (void *data)
-{
- struct subvol_healer *healer = NULL;
- xlator_t *this = NULL;
- int ret = 0;
- afr_private_t *priv = NULL;
-
- healer = data;
- THIS = this = healer->this;
- priv = this->private;
-
- for (;;) {
- afr_shd_healer_wait (healer);
-
- ASSERT_LOCAL(this, healer);
- priv->local[healer->subvol] = healer->local;
-
- do {
- gf_msg_debug (this->name, 0,
- "starting index sweep on subvol %s",
- afr_subvol_name (this, healer->subvol));
-
- afr_shd_sweep_prepare (healer);
-
- ret = afr_shd_index_sweep_all (healer);
-
- afr_shd_sweep_done (healer);
- /*
- As long as at least one gfid was
- healed, keep retrying. We may have
- just healed a directory and thereby
- created entries for other gfids which
- could not be healed thus far.
- */
-
- gf_msg_debug (this->name, 0,
- "finished index sweep on subvol %s",
- afr_subvol_name (this, healer->subvol));
- /*
- Give a pause before retrying to avoid a busy loop
- in case the only entry in index is because of
- an ongoing I/O.
- */
- sleep (1);
- } while (ret > 0);
- }
-
- return NULL;
-}
+out:
+ if (xattr)
+ dict_unref(xattr);
+ return ret;
+}
-void *
-afr_shd_full_healer (void *data)
+void
+afr_shd_ta_get_xattrs(xlator_t *this, loc_t *loc, struct subvol_healer *healer,
+ dict_t **xdata)
{
- struct subvol_healer *healer = NULL;
- xlator_t *this = NULL;
- int run = 0;
+ int ret = 0;
+
+ loc_wipe(loc);
+ if (afr_shd_fill_ta_loc(this, loc)) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to populate thin-arbiter loc for: %s.", loc->name);
+ ret = -1;
+ goto out;
+ }
+
+ ret = afr_ta_post_op_lock(this, loc);
+ if (ret)
+ goto out;
+
+ ret = _afr_shd_ta_get_xattrs(this, loc, xdata);
+ if (ret) {
+ if (*xdata) {
+ dict_unref(*xdata);
+ *xdata = NULL;
+ }
+ }
- healer = data;
- THIS = this = healer->this;
+ afr_ta_post_op_unlock(this, loc);
+
+out:
+ if (ret)
+ healer->rerun = 1;
+}
- for (;;) {
- pthread_mutex_lock (&healer->mutex);
- {
- run = __afr_shd_healer_wait (healer);
- if (!run)
- healer->running = _gf_false;
- }
- pthread_mutex_unlock (&healer->mutex);
+int
+afr_shd_ta_unset_xattrs(xlator_t *this, loc_t *loc, dict_t **xdata, int healer)
+{
+ afr_private_t *priv = NULL;
+ dict_t *xattr = NULL;
+ gf_boolean_t need_xattrop = _gf_false;
+ void *pending_raw = NULL;
+ int *raw = NULL;
+ int pending[AFR_NUM_CHANGE_LOGS] = {
+ 0,
+ };
+ int i = 0;
+ int j = 0;
+ int val = 0;
+ int ret = -1;
+
+ priv = this->private;
+
+ xattr = dict_new();
+ if (!xattr) {
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ raw = GF_CALLOC(AFR_NUM_CHANGE_LOGS, sizeof(int), gf_afr_mt_int32_t);
+ if (!raw) {
+ goto out;
+ }
- if (!run)
- break;
+ ret = dict_get_ptr(*xdata, priv->pending_key[i], &pending_raw);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_GET_FAILED,
+ "Error getting value "
+ "of pending key %s",
+ priv->pending_key[i]);
+ GF_FREE(raw);
+ goto out;
+ }
- ASSERT_LOCAL(this, healer);
+ memcpy(pending, pending_raw, sizeof(pending));
+ for (j = 0; j < AFR_NUM_CHANGE_LOGS; j++) {
+ val = ntoh32(pending[j]);
+ if (val) {
+ if (i == healer) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_THIN_ARB,
+ "I am "
+ "not the good shd. Skipping. "
+ "SHD = %d.",
+ healer);
+ ret = 0;
+ GF_FREE(raw);
+ goto out;
+ }
+ need_xattrop = _gf_true;
+ raw[j] = hton32(-val);
+ }
+ }
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
- "starting full sweep on subvol %s",
- afr_subvol_name (this, healer->subvol));
+ ret = dict_set_bin(xattr, priv->pending_key[i], raw,
+ AFR_NUM_CHANGE_LOGS * sizeof(int));
+ if (ret) {
+ GF_FREE(raw);
+ goto out;
+ }
- afr_shd_sweep_prepare (healer);
+ if (need_xattrop)
+ break;
+ }
- afr_shd_full_sweep (healer, this->itable->root);
+ if (!need_xattrop) {
+ ret = 0;
+ goto out;
+ }
- afr_shd_sweep_done (healer);
+ ret = syncop_xattrop(priv->children[THIN_ARBITER_BRICK_INDEX], loc,
+ GF_XATTROP_ADD_ARRAY, xattr, NULL, NULL, NULL);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Xattrop failed.");
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
- "finished full sweep on subvol %s",
- afr_subvol_name (this, healer->subvol));
- }
+out:
+ if (xattr)
+ dict_unref(xattr);
- return NULL;
+ return ret;
}
-
-int
-afr_shd_healer_init (xlator_t *this, struct subvol_healer *healer)
+void
+afr_shd_ta_check_and_unset_xattrs(xlator_t *this, loc_t *loc,
+ struct subvol_healer *healer,
+ dict_t *pre_crawl_xdata)
{
- int ret = 0;
+ int ret_lock = 0;
+ int ret = 0;
+ dict_t *post_crawl_xdata = NULL;
- ret = pthread_mutex_init (&healer->mutex, NULL);
- if (ret)
- goto out;
+ ret_lock = afr_ta_post_op_lock(this, loc);
+ if (ret_lock)
+ goto unref;
- ret = pthread_cond_init (&healer->cond, NULL);
- if (ret)
- goto out;
+ ret = _afr_shd_ta_get_xattrs(this, loc, &post_crawl_xdata);
+ if (ret)
+ goto unref;
- healer->this = this;
- healer->running = _gf_false;
- healer->rerun = _gf_false;
- healer->local = _gf_false;
-out:
- return ret;
-}
+ if (!are_dicts_equal(pre_crawl_xdata, post_crawl_xdata, NULL, NULL)) {
+ ret = -1;
+ goto unref;
+ }
+ ret = afr_shd_ta_unset_xattrs(this, loc, &post_crawl_xdata, healer->subvol);
-int
-afr_shd_healer_spawn (xlator_t *this, struct subvol_healer *healer,
- void *(threadfn)(void *))
-{
- int ret = 0;
-
- pthread_mutex_lock (&healer->mutex);
- {
- if (healer->running) {
- pthread_cond_signal (&healer->cond);
- } else {
- ret = gf_thread_create (&healer->thread, NULL,
- threadfn, healer);
- if (ret)
- goto unlock;
- healer->running = 1;
- }
-
- healer->rerun = 1;
- }
-unlock:
- pthread_mutex_unlock (&healer->mutex);
-
- return ret;
-}
+unref:
+ if (post_crawl_xdata) {
+ dict_unref(post_crawl_xdata);
+ post_crawl_xdata = NULL;
+ }
+ if (ret || ret_lock)
+ healer->rerun = 1;
-int
-afr_shd_full_healer_spawn (xlator_t *this, int subvol)
-{
- return afr_shd_healer_spawn (this, NTH_FULL_HEALER (this, subvol),
- afr_shd_full_healer);
+ if (!ret_lock)
+ afr_ta_post_op_unlock(this, loc);
}
-
-int
-afr_shd_index_healer_spawn (xlator_t *this, int subvol)
+gf_boolean_t
+afr_bricks_available_for_heal(afr_private_t *priv)
{
- return afr_shd_healer_spawn (this, NTH_INDEX_HEALER (this, subvol),
- afr_shd_index_healer);
-}
+ int up_children = 0;
+ up_children = __afr_get_up_children_count(priv);
+ if (up_children < 2) {
+ return _gf_false;
+ }
+ return _gf_true;
+}
-int
-afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output,
- crawl_event_t *crawl_event)
+static gf_boolean_t
+afr_shd_ta_needs_heal(xlator_t *this, struct subvol_healer *healer)
{
- int ret = 0;
- uint64_t count = 0;
- char key[256] = {0};
- int xl_id = 0;
- uint64_t healed_count = 0;
- uint64_t split_brain_count = 0;
- uint64_t heal_failed_count = 0;
- char *start_time_str = 0;
- char *end_time_str = NULL;
- char *crawl_type = NULL;
- int progress = -1;
- int child = -1;
-
- child = crawl_event->child;
- healed_count = crawl_event->healed_count;
- split_brain_count = crawl_event->split_brain_count;
- heal_failed_count = crawl_event->heal_failed_count;
- crawl_type = crawl_event->crawl_type;
-
- if (!crawl_event->start_time)
- goto out;
-
- start_time_str = gf_strdup (ctime (&crawl_event->start_time));
-
- if (crawl_event->end_time)
- end_time_str = gf_strdup (ctime (&crawl_event->end_time));
-
- ret = dict_get_int32 (output, this->name, &xl_id);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, -ret,
- AFR_MSG_DICT_GET_FAILED, "xl does not have id");
- goto out;
+ dict_t *xdata = NULL;
+ afr_private_t *priv = NULL;
+ loc_t loc = {
+ 0,
+ };
+ int ret = -1;
+ int i = 0;
+ gf_boolean_t need_heal = _gf_false;
+
+ priv = this->private;
+
+ ret = afr_shd_fill_ta_loc(this, &loc);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to populate thin-arbiter loc for: %s.", loc.name);
+ healer->rerun = 1;
+ goto out;
+ }
+
+ if (_afr_shd_ta_get_xattrs(this, &loc, &xdata)) {
+ healer->rerun = 1;
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (afr_ta_dict_contains_pending_xattr(xdata, priv, i)) {
+ need_heal = _gf_true;
+ break;
}
+ }
- snprintf (key, sizeof (key), "statistics-%d-%d-count", xl_id, child);
- ret = dict_get_uint64 (output, key, &count);
+out:
+ if (xdata)
+ dict_unref(xdata);
+ loc_wipe(&loc);
+ return need_heal;
+}
- snprintf (key, sizeof (key), "statistics_healed_cnt-%d-%d-%"PRIu64,
- xl_id, child, count);
- ret = dict_set_uint64(output, key, healed_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not add statistics_healed_count to outout");
+static int
+afr_shd_anon_inode_cleaner(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+ void *data)
+{
+ struct subvol_healer *healer = data;
+ afr_private_t *priv = healer->this->private;
+ call_frame_t *frame = NULL;
+ afr_local_t *local = NULL;
+ int ret = 0;
+ loc_t loc = {0};
+ int count = 0;
+ int i = 0;
+ int op_errno = 0;
+ struct iatt *iatt = NULL;
+ gf_boolean_t multiple_links = _gf_false;
+ unsigned char *gfid_present = alloca0(priv->child_count);
+ unsigned char *entry_present = alloca0(priv->child_count);
+ char *type = "file";
+
+ frame = afr_frame_create(healer->this, &ret);
+ if (!frame) {
+ ret = -ret;
+ goto out;
+ }
+ local = frame->local;
+ if (AFR_COUNT(local->child_up, priv->child_count) != priv->child_count) {
+ gf_msg_debug(healer->this->name, 0,
+ "Not all bricks are up. Skipping "
+ "cleanup of %s on %s",
+ entry->d_name, subvol->name);
+ ret = 0;
+ goto out;
+ }
+
+ loc.inode = inode_new(parent->inode->table);
+ if (!loc.inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = gf_uuid_parse(entry->d_name, loc.gfid);
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+ AFR_ONLIST(local->child_up, frame, afr_selfheal_discover_cbk, lookup, &loc,
+ NULL);
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].op_ret == 0) {
+ count++;
+ gfid_present[i] = 1;
+ iatt = &local->replies[i].poststat;
+ if (iatt->ia_type == IA_IFDIR) {
+ type = "dir";
+ }
+
+ if (i == healer->subvol) {
+ if (local->replies[i].poststat.ia_nlink > 1) {
+ multiple_links = _gf_true;
+ }
+ }
+ } else if (local->replies[i].op_errno != ENOENT &&
+ local->replies[i].op_errno != ESTALE) {
+ /*We don't have complete view. Skip the entry*/
+ gf_msg_debug(healer->this->name, local->replies[i].op_errno,
+ "Skipping cleanup of %s on %s", entry->d_name,
+ subvol->name);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ /*Inode is deleted from subvol*/
+ if (count == 1 || (iatt->ia_type != IA_IFDIR && multiple_links)) {
+ gf_msg(healer->this->name, GF_LOG_WARNING, 0,
+ AFR_MSG_EXPUNGING_FILE_OR_DIR, "expunging %s %s/%s on %s", type,
+ priv->anon_inode_name, entry->d_name, subvol->name);
+ ret = afr_shd_entry_purge(subvol, parent->inode, entry->d_name,
+ iatt->ia_type);
+ if (ret == -ENOENT || ret == -ESTALE)
+ ret = 0;
+ } else if (count > 1) {
+ loc_wipe(&loc);
+ loc.parent = inode_ref(parent->inode);
+ loc.name = entry->d_name;
+ loc.inode = inode_new(parent->inode->table);
+ if (!loc.inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ AFR_ONLIST(local->child_up, frame, afr_selfheal_discover_cbk, lookup,
+ &loc, NULL);
+ count = 0;
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->replies[i].op_ret == 0) {
+ count++;
+ entry_present[i] = 1;
+ iatt = &local->replies[i].poststat;
+ } else if (local->replies[i].op_errno != ENOENT &&
+ local->replies[i].op_errno != ESTALE) {
+ /*We don't have complete view. Skip the entry*/
+ gf_msg_debug(healer->this->name, local->replies[i].op_errno,
+ "Skipping cleanup of %s on %s", entry->d_name,
+ subvol->name);
+ ret = 0;
goto out;
- }
-
- snprintf (key, sizeof (key), "statistics_sb_cnt-%d-%d-%"PRIu64,
- xl_id, child, count);
- ret = dict_set_uint64 (output, key, split_brain_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not add statistics_split_brain_count to outout");
+ }
+ }
+ for (i = 0; i < priv->child_count; i++) {
+ if (gfid_present[i] && !entry_present[i]) {
+ /*Entry is not anonymous on at least one subvol*/
+ gf_msg_debug(healer->this->name, 0,
+ "Valid entry present on %s "
+ "Skipping cleanup of %s on %s",
+ priv->children[i]->name, entry->d_name,
+ subvol->name);
+ ret = 0;
goto out;
+ }
}
- snprintf (key, sizeof (key), "statistics_crawl_type-%d-%d-%"PRIu64,
- xl_id, child, count);
- ret = dict_set_str (output, key, crawl_type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not add statistics_crawl_type to output");
- goto out;
+ gf_msg(healer->this->name, GF_LOG_WARNING, 0,
+ AFR_MSG_EXPUNGING_FILE_OR_DIR,
+ "expunging %s %s/%s on all subvols", type, priv->anon_inode_name,
+ entry->d_name);
+ ret = 0;
+ for (i = 0; i < priv->child_count; i++) {
+ op_errno = -afr_shd_entry_purge(priv->children[i], loc.parent,
+ entry->d_name, iatt->ia_type);
+ if (op_errno != ENOENT && op_errno != ESTALE) {
+ ret |= -op_errno;
+ }
}
+ }
- snprintf (key, sizeof (key), "statistics_heal_failed_cnt-%d-%d-%"PRIu64,
- xl_id, child, count);
- ret = dict_set_uint64 (output, key, heal_failed_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not add statistics_healed_failed_count to outout");
- goto out;
+out:
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ loc_wipe(&loc);
+ return ret;
+}
+
+static void
+afr_cleanup_anon_inode_dir(struct subvol_healer *healer)
+{
+ int ret = 0;
+ call_frame_t *frame = NULL;
+ afr_private_t *priv = healer->this->private;
+ loc_t loc = {0};
+
+ ret = afr_anon_inode_create(healer->this, healer->subvol, &loc.inode);
+ if (ret)
+ goto out;
+
+ frame = afr_frame_create(healer->this, &ret);
+ if (!frame) {
+ ret = -ret;
+ goto out;
+ }
+
+ ret = syncop_mt_dir_scan(frame, priv->children[healer->subvol], &loc,
+ GF_CLIENT_PID_SELF_HEALD, healer,
+ afr_shd_anon_inode_cleaner, NULL,
+ priv->shd.max_threads, priv->shd.wait_qlength);
+out:
+ if (frame)
+ AFR_STACK_DESTROY(frame);
+ loc_wipe(&loc);
+ return;
+}
+
+void *
+afr_shd_index_healer(void *data)
+{
+ struct subvol_healer *healer = NULL;
+ xlator_t *this = NULL;
+ int ret = 0;
+ afr_private_t *priv = NULL;
+ dict_t *pre_crawl_xdata = NULL;
+ loc_t loc = {
+ 0,
+ };
+
+ healer = data;
+ THIS = this = healer->this;
+ priv = this->private;
+
+ for (;;) {
+ afr_shd_healer_wait(healer);
+
+ if (!afr_bricks_available_for_heal(priv))
+ continue;
+
+ ASSERT_LOCAL(this, healer);
+ priv->local[healer->subvol] = healer->local;
+
+ if (priv->thin_arbiter_count) {
+ if (afr_shd_ta_needs_heal(this, healer))
+ afr_shd_ta_get_xattrs(this, &loc, healer, &pre_crawl_xdata);
}
- snprintf (key, sizeof (key), "statistics_strt_time-%d-%d-%"PRIu64,
- xl_id, child, count);
- ret = dict_set_dynstr (output, key, start_time_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not add statistics_crawl_start_time to outout");
- goto out;
- } else {
- start_time_str = NULL;
- }
-
- if (!end_time_str)
- progress = 1;
- else
- progress = 0;
-
- snprintf (key, sizeof (key), "statistics_end_time-%d-%d-%"PRIu64,
- xl_id, child, count);
- if (!end_time_str)
- end_time_str = gf_strdup ("Could not determine the end time");
- ret = dict_set_dynstr (output, key, end_time_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not add statistics_crawl_end_time to outout");
- goto out;
- } else {
- end_time_str = NULL;
- }
+ do {
+ gf_msg_debug(this->name, 0, "starting index sweep on subvol %s",
+ afr_subvol_name(this, healer->subvol));
+
+ afr_shd_sweep_prepare(healer);
+
+ ret = afr_shd_index_sweep_all(healer);
+
+ afr_shd_sweep_done(healer);
+ /*
+ As long as at least one gfid was
+ healed, keep retrying. We may have
+ just healed a directory and thereby
+ created entries for other gfids which
+ could not be healed thus far.
+ */
+
+ gf_msg_debug(this->name, 0, "finished index sweep on subvol %s",
+ afr_subvol_name(this, healer->subvol));
+ /*
+ Give a pause before retrying to avoid a busy loop
+ in case the only entry in index is because of
+ an ongoing I/O.
+ */
+ sleep(1);
+ } while (ret > 0);
+
+ if (ret == 0) {
+ afr_cleanup_anon_inode_dir(healer);
+ }
- snprintf (key, sizeof (key), "statistics_inprogress-%d-%d-%"PRIu64,
- xl_id, child, count);
+ if (ret == 0 && pre_crawl_xdata &&
+ !healer->crawl_event.heal_failed_count) {
+ afr_shd_ta_check_and_unset_xattrs(this, &loc, healer,
+ pre_crawl_xdata);
+ }
- ret = dict_set_int32 (output, key, progress);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not add statistics_inprogress to outout");
- goto out;
+ if (pre_crawl_xdata) {
+ dict_unref(pre_crawl_xdata);
+ pre_crawl_xdata = NULL;
}
+ }
- snprintf (key, sizeof (key), "statistics-%d-%d-count", xl_id, child);
- ret = dict_set_uint64 (output, key, count + 1);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not increment the counter.");
- goto out;
- }
-out:
- GF_FREE (start_time_str);
- GF_FREE (end_time_str);
- return ret;
+ return NULL;
}
-
-int
-afr_shd_dict_add_path (xlator_t *this, dict_t *output, int child, char *path,
- struct timeval *tv)
+void *
+afr_shd_full_healer(void *data)
{
- int ret = -1;
- uint64_t count = 0;
- char key[256] = {0};
- int xl_id = 0;
+ struct subvol_healer *healer = NULL;
+ xlator_t *this = NULL;
+ int run = 0;
- ret = dict_get_int32 (output, this->name, &xl_id);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, -ret,
- AFR_MSG_DICT_GET_FAILED, "xl does not have id");
- goto out;
+ healer = data;
+ THIS = this = healer->this;
+
+ for (;;) {
+ pthread_mutex_lock(&healer->mutex);
+ {
+ run = __afr_shd_healer_wait(healer);
+ if (!run)
+ healer->running = _gf_false;
}
+ pthread_mutex_unlock(&healer->mutex);
- snprintf (key, sizeof (key), "%d-%d-count", xl_id, child);
- ret = dict_get_uint64 (output, key, &count);
+ if (!run)
+ break;
- snprintf (key, sizeof (key), "%d-%d-%"PRIu64, xl_id, child, count);
- ret = dict_set_dynstr (output, key, path);
+ ASSERT_LOCAL(this, healer);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, -ret,
- AFR_MSG_DICT_SET_FAILED, "%s: Could not add to output",
- path);
- goto out;
- }
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
+ "starting full sweep on subvol %s",
+ afr_subvol_name(this, healer->subvol));
- if (tv) {
- snprintf (key, sizeof (key), "%d-%d-%"PRIu64"-time", xl_id,
- child, count);
- ret = dict_set_uint32 (output, key, tv->tv_sec);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "%s: Could not set time",
- path);
- goto out;
- }
- }
-
- snprintf (key, sizeof (key), "%d-%d-count", xl_id, child);
-
- ret = dict_set_uint64 (output, key, count + 1);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_DICT_SET_FAILED,
- "Could not increment count");
- goto out;
- }
+ afr_shd_sweep_prepare(healer);
- ret = 0;
-out:
- return ret;
+ afr_shd_full_sweep(healer, this->itable->root);
+
+ afr_shd_sweep_done(healer);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
+ "finished full sweep on subvol %s",
+ afr_subvol_name(this, healer->subvol));
+ }
+
+ return NULL;
}
int
-afr_add_shd_event (circular_buffer_t *cb, void *data)
+afr_shd_healer_init(xlator_t *this, struct subvol_healer *healer)
{
- dict_t *output = NULL;
- xlator_t *this = THIS;
- afr_private_t *priv = NULL;
- afr_self_heald_t *shd = NULL;
- shd_event_t *shd_event = NULL;
- char *path = NULL;
-
- output = data;
- priv = this->private;
- shd = &priv->shd;
- shd_event = cb->data;
-
- if (!shd->index_healers[shd_event->child].local)
- return 0;
-
- path = gf_strdup (shd_event->path);
- if (!path)
- return -ENOMEM;
-
- afr_shd_dict_add_path (this, output, shd_event->child, path,
- &cb->tv);
- return 0;
+ int ret = 0;
+
+ ret = pthread_mutex_init(&healer->mutex, NULL);
+ if (ret)
+ goto out;
+
+ ret = pthread_cond_init(&healer->cond, NULL);
+ if (ret)
+ goto out;
+
+ healer->this = this;
+ healer->running = _gf_false;
+ healer->rerun = _gf_false;
+ healer->local = _gf_false;
+out:
+ return ret;
}
int
-afr_add_crawl_event (circular_buffer_t *cb, void *data)
+afr_shd_healer_spawn(xlator_t *this, struct subvol_healer *healer,
+ void *(threadfn)(void *))
{
- dict_t *output = NULL;
- xlator_t *this = THIS;
- afr_private_t *priv = NULL;
- afr_self_heald_t *shd = NULL;
- crawl_event_t *crawl_event = NULL;
+ int ret = 0;
- output = data;
- priv = this->private;
- shd = &priv->shd;
- crawl_event = cb->data;
+ pthread_mutex_lock(&healer->mutex);
+ {
+ if (healer->running) {
+ pthread_cond_signal(&healer->cond);
+ } else {
+ ret = gf_thread_create(&healer->thread, NULL, threadfn, healer,
+ "shdheal");
+ if (ret)
+ goto unlock;
+ healer->running = 1;
+ }
- if (!shd->index_healers[crawl_event->child].local)
- return 0;
+ healer->rerun = 1;
+ }
+unlock:
+ pthread_mutex_unlock(&healer->mutex);
- afr_shd_dict_add_crawl_event (this, output, crawl_event);
+ return ret;
+}
- return 0;
+int
+afr_shd_full_healer_spawn(xlator_t *this, int subvol)
+{
+ return afr_shd_healer_spawn(this, NTH_FULL_HEALER(this, subvol),
+ afr_shd_full_healer);
}
+int
+afr_shd_index_healer_spawn(xlator_t *this, int subvol)
+{
+ return afr_shd_healer_spawn(this, NTH_INDEX_HEALER(this, subvol),
+ afr_shd_index_healer);
+}
int
-afr_selfheal_daemon_init (xlator_t *this)
+afr_shd_dict_add_crawl_event(xlator_t *this, dict_t *output,
+ crawl_event_t *crawl_event)
{
- afr_private_t *priv = NULL;
- afr_self_heald_t *shd = NULL;
- int ret = -1;
- int i = 0;
-
- priv = this->private;
- shd = &priv->shd;
-
- this->itable = inode_table_new (SHD_INODE_LRU_LIMIT, this);
- if (!this->itable)
- goto out;
-
- shd->index_healers = GF_CALLOC (sizeof(*shd->index_healers),
- priv->child_count,
- gf_afr_mt_subvol_healer_t);
- if (!shd->index_healers)
- goto out;
-
- for (i = 0; i < priv->child_count; i++) {
- shd->index_healers[i].subvol = i;
- ret = afr_shd_healer_init (this, &shd->index_healers[i]);
- if (ret)
- goto out;
- }
-
- shd->full_healers = GF_CALLOC (sizeof(*shd->full_healers),
- priv->child_count,
- gf_afr_mt_subvol_healer_t);
- if (!shd->full_healers)
- goto out;
- for (i = 0; i < priv->child_count; i++) {
- shd->full_healers[i].subvol = i;
- ret = afr_shd_healer_init (this, &shd->full_healers[i]);
- if (ret)
- goto out;
- }
-
- shd->split_brain = eh_new (AFR_EH_SPLIT_BRAIN_LIMIT, _gf_false,
- afr_destroy_shd_event_data);
- if (!shd->split_brain)
- goto out;
-
- shd->statistics = GF_CALLOC (sizeof(eh_t *), priv->child_count,
- gf_common_mt_eh_t);
- if (!shd->statistics)
- goto out;
+ int ret = 0;
+ uint64_t count = 0;
+ char key[128] = {0};
+ int keylen = 0;
+ char suffix[64] = {0};
+ int xl_id = 0;
+ uint64_t healed_count = 0;
+ uint64_t split_brain_count = 0;
+ uint64_t heal_failed_count = 0;
+ char *start_time_str = 0;
+ char *end_time_str = NULL;
+ char *crawl_type = NULL;
+ int progress = -1;
+ int child = -1;
+
+ child = crawl_event->child;
+ healed_count = crawl_event->healed_count;
+ split_brain_count = crawl_event->split_brain_count;
+ heal_failed_count = crawl_event->heal_failed_count;
+ crawl_type = crawl_event->crawl_type;
+
+ if (!crawl_event->start_time)
+ goto out;
+
+ start_time_str = gf_strdup(ctime(&crawl_event->start_time));
+
+ if (crawl_event->end_time)
+ end_time_str = gf_strdup(ctime(&crawl_event->end_time));
+
+ ret = dict_get_int32(output, this->name, &xl_id);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_GET_FAILED,
+ "xl does not have id");
+ goto out;
+ }
+
+ snprintf(key, sizeof(key), "statistics-%d-%d-count", xl_id, child);
+ ret = dict_get_uint64(output, key, &count);
+
+ snprintf(suffix, sizeof(suffix), "%d-%d-%" PRIu64, xl_id, child, count);
+ snprintf(key, sizeof(key), "statistics_healed_cnt-%s", suffix);
+ ret = dict_set_uint64(output, key, healed_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not add statistics_healed_count to output");
+ goto out;
+ }
+
+ snprintf(key, sizeof(key), "statistics_sb_cnt-%s", suffix);
+ ret = dict_set_uint64(output, key, split_brain_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not add statistics_split_brain_count to output");
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "statistics_crawl_type-%s", suffix);
+ ret = dict_set_strn(output, key, keylen, crawl_type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not add statistics_crawl_type to output");
+ goto out;
+ }
+
+ snprintf(key, sizeof(key), "statistics_heal_failed_cnt-%s", suffix);
+ ret = dict_set_uint64(output, key, heal_failed_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not add statistics_healed_failed_count to output");
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "statistics_strt_time-%s", suffix);
+ ret = dict_set_dynstrn(output, key, keylen, start_time_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not add statistics_crawl_start_time to output");
+ goto out;
+ } else {
+ start_time_str = NULL;
+ }
+
+ if (!end_time_str)
+ progress = 1;
+ else
+ progress = 0;
+
+ keylen = snprintf(key, sizeof(key), "statistics_end_time-%s", suffix);
+ if (!end_time_str)
+ end_time_str = gf_strdup("Could not determine the end time");
+ ret = dict_set_dynstrn(output, key, keylen, end_time_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not add statistics_crawl_end_time to output");
+ goto out;
+ } else {
+ end_time_str = NULL;
+ }
+
+ keylen = snprintf(key, sizeof(key), "statistics_inprogress-%s", suffix);
+
+ ret = dict_set_int32n(output, key, keylen, progress);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not add statistics_inprogress to output");
+ goto out;
+ }
+
+ snprintf(key, sizeof(key), "statistics-%d-%d-count", xl_id, child);
+ ret = dict_set_uint64(output, key, count + 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not increment the counter.");
+ goto out;
+ }
+out:
+ GF_FREE(start_time_str);
+ GF_FREE(end_time_str);
+ return ret;
+}
- for (i = 0; i < priv->child_count ; i++) {
- shd->statistics[i] = eh_new (AFR_STATISTICS_HISTORY_SIZE,
- _gf_false,
- afr_destroy_crawl_event_data);
- if (!shd->statistics[i])
- goto out;
- shd->full_healers[i].crawl_event.child = i;
- shd->full_healers[i].crawl_event.crawl_type = "FULL";
- shd->index_healers[i].crawl_event.child = i;
- shd->index_healers[i].crawl_event.crawl_type = "INDEX";
+int
+afr_shd_dict_add_path(xlator_t *this, dict_t *output, int child, char *path,
+ struct timeval *tv)
+{
+ int ret = -1;
+ uint64_t count = 0;
+ char key[64] = {0};
+ int keylen = 0;
+ char xl_id_child_str[32] = {0};
+ int xl_id = 0;
+
+ ret = dict_get_int32(output, this->name, &xl_id);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_GET_FAILED,
+ "xl does not have id");
+ goto out;
+ }
+
+ snprintf(xl_id_child_str, sizeof(xl_id_child_str), "%d-%d", xl_id, child);
+ snprintf(key, sizeof(key), "%s-count", xl_id_child_str);
+ ret = dict_get_uint64(output, key, &count);
+
+ keylen = snprintf(key, sizeof(key), "%s-%" PRIu64, xl_id_child_str, count);
+ ret = dict_set_dynstrn(output, key, keylen, path);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Could not add to output", path);
+ goto out;
+ }
+
+ if (tv) {
+ snprintf(key, sizeof(key), "%s-%" PRIu64 "-time", xl_id_child_str,
+ count);
+ ret = dict_set_uint32(output, key, tv->tv_sec);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "%s: Could not set time", path);
+ goto out;
}
+ }
+
+ snprintf(key, sizeof(key), "%s-count", xl_id_child_str);
- ret = 0;
+ ret = dict_set_uint64(output, key, count + 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "Could not increment count");
+ goto out;
+ }
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
+int
+afr_add_shd_event(circular_buffer_t *cb, void *data)
+{
+ dict_t *output = NULL;
+ xlator_t *this = THIS;
+ afr_private_t *priv = NULL;
+ afr_self_heald_t *shd = NULL;
+ shd_event_t *shd_event = NULL;
+ char *path = NULL;
+
+ output = data;
+ priv = this->private;
+ shd = &priv->shd;
+ shd_event = cb->data;
+
+ if (!shd->index_healers[shd_event->child].local)
+ return 0;
+
+ path = gf_strdup(shd_event->path);
+ if (!path)
+ return -ENOMEM;
+
+ afr_shd_dict_add_path(this, output, shd_event->child, path, &cb->tv);
+ return 0;
+}
int
-afr_selfheal_childup (xlator_t *this, int subvol)
+afr_add_crawl_event(circular_buffer_t *cb, void *data)
{
- afr_shd_index_healer_spawn (this, subvol);
+ dict_t *output = NULL;
+ xlator_t *this = THIS;
+ afr_private_t *priv = NULL;
+ afr_self_heald_t *shd = NULL;
+ crawl_event_t *crawl_event = NULL;
+
+ output = data;
+ priv = this->private;
+ shd = &priv->shd;
+ crawl_event = cb->data;
+
+ if (!shd->index_healers[crawl_event->child].local)
+ return 0;
+
+ afr_shd_dict_add_crawl_event(this, output, crawl_event);
- return 0;
+ return 0;
+}
+
+int
+afr_selfheal_daemon_init(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_self_heald_t *shd = NULL;
+ int ret = -1;
+ int i = 0;
+
+ priv = this->private;
+ shd = &priv->shd;
+
+ shd->index_healers = GF_CALLOC(sizeof(*shd->index_healers),
+ priv->child_count,
+ gf_afr_mt_subvol_healer_t);
+ if (!shd->index_healers)
+ goto out;
+
+ for (i = 0; i < priv->child_count; i++) {
+ shd->index_healers[i].subvol = i;
+ ret = afr_shd_healer_init(this, &shd->index_healers[i]);
+ if (ret)
+ goto out;
+ }
+
+ shd->full_healers = GF_CALLOC(sizeof(*shd->full_healers), priv->child_count,
+ gf_afr_mt_subvol_healer_t);
+ if (!shd->full_healers)
+ goto out;
+ for (i = 0; i < priv->child_count; i++) {
+ shd->full_healers[i].subvol = i;
+ ret = afr_shd_healer_init(this, &shd->full_healers[i]);
+ if (ret)
+ goto out;
+ }
+
+ shd->split_brain = eh_new(AFR_EH_SPLIT_BRAIN_LIMIT, _gf_false,
+ afr_destroy_shd_event_data);
+ if (!shd->split_brain)
+ goto out;
+
+ shd->statistics = GF_CALLOC(sizeof(eh_t *), priv->child_count,
+ gf_common_mt_eh_t);
+ if (!shd->statistics)
+ goto out;
+
+ for (i = 0; i < priv->child_count; i++) {
+ shd->statistics[i] = eh_new(AFR_STATISTICS_HISTORY_SIZE, _gf_false,
+ afr_destroy_crawl_event_data);
+ if (!shd->statistics[i])
+ goto out;
+ shd->full_healers[i].crawl_event.child = i;
+ shd->full_healers[i].crawl_event.crawl_type = "FULL";
+ shd->index_healers[i].crawl_event.child = i;
+ shd->index_healers[i].crawl_event.crawl_type = "INDEX";
+ }
+
+ ret = 0;
+out:
+ return ret;
}
+void
+afr_selfheal_childup(xlator_t *this, afr_private_t *priv)
+{
+ int subvol = 0;
+
+ if (!priv->shd.iamshd)
+ return;
+ for (subvol = 0; subvol < priv->child_count; subvol++)
+ if (priv->child_up[subvol])
+ afr_shd_index_healer_spawn(this, subvol);
+
+ return;
+}
int
-afr_shd_get_index_count (xlator_t *this, int i, uint64_t *count)
+afr_shd_get_index_count(xlator_t *this, int i, uint64_t *count)
{
- afr_private_t *priv = NULL;
- xlator_t *subvol = NULL;
- loc_t rootloc = {0, };
- dict_t *xattr = NULL;
- int ret = -1;
+ afr_private_t *priv = NULL;
+ xlator_t *subvol = NULL;
+ loc_t rootloc = {
+ 0,
+ };
+ dict_t *xattr = NULL;
+ int ret = -1;
- priv = this->private;
- subvol = priv->children[i];
+ priv = this->private;
+ subvol = priv->children[i];
- rootloc.inode = inode_ref (this->itable->root);
- gf_uuid_copy (rootloc.gfid, rootloc.inode->gfid);
+ rootloc.inode = inode_ref(this->itable->root);
+ gf_uuid_copy(rootloc.gfid, rootloc.inode->gfid);
- ret = syncop_getxattr (subvol, &rootloc, &xattr,
- GF_XATTROP_INDEX_COUNT, NULL, NULL);
- if (ret < 0)
- goto out;
+ ret = syncop_getxattr(subvol, &rootloc, &xattr, GF_XATTROP_INDEX_COUNT,
+ NULL, NULL);
+ if (ret < 0)
+ goto out;
- ret = dict_get_uint64 (xattr, GF_XATTROP_INDEX_COUNT, count);
- if (ret)
- goto out;
+ ret = dict_get_uint64(xattr, GF_XATTROP_INDEX_COUNT, count);
+ if (ret)
+ goto out;
- ret = 0;
+ ret = 0;
out:
- if (xattr)
- dict_unref (xattr);
- loc_wipe (&rootloc);
+ if (xattr)
+ dict_unref(xattr);
+ loc_wipe(&rootloc);
- return ret;
+ return ret;
}
-
int
-afr_xl_op (xlator_t *this, dict_t *input, dict_t *output)
+afr_xl_op(xlator_t *this, dict_t *input, dict_t *output)
{
- gf_xl_afr_op_t op = GF_SHD_OP_INVALID;
- int ret = 0;
- int xl_id = 0;
- afr_private_t *priv = NULL;
- afr_self_heald_t *shd = NULL;
- struct subvol_healer *healer = NULL;
- int i = 0;
- char key[64];
- int op_ret = 0;
- uint64_t cnt = 0;
-
- priv = this->private;
- shd = &priv->shd;
-
- ret = dict_get_int32 (input, "xl-op", (int32_t*)&op);
- if (ret)
- goto out;
- ret = dict_get_int32 (input, this->name, &xl_id);
- if (ret)
- goto out;
- ret = dict_set_int32 (output, this->name, xl_id);
- if (ret)
- goto out;
- switch (op) {
+ gf_xl_afr_op_t op = GF_SHD_OP_INVALID;
+ int ret = 0;
+ int xl_id = 0;
+ afr_private_t *priv = NULL;
+ afr_self_heald_t *shd = NULL;
+ struct subvol_healer *healer = NULL;
+ int i = 0;
+ char key[64];
+ int keylen = 0;
+ int this_name_len = 0;
+ int op_ret = 0;
+ uint64_t cnt = 0;
+
+#define AFR_SET_DICT_AND_LOG(name, output, key, keylen, dict_str, \
+ dict_str_len) \
+ { \
+ int ret; \
+ \
+ ret = dict_set_nstrn(output, key, keylen, dict_str, dict_str_len); \
+ if (ret) { \
+ gf_smsg(name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED, \
+ "key=%s", key, "value=%s", dict_str, NULL); \
+ } \
+ }
+
+ priv = this->private;
+ shd = &priv->shd;
+
+ ret = dict_get_int32_sizen(input, "xl-op", (int32_t *)&op);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_GET_FAILED,
+ "key=xl-op", NULL);
+ goto out;
+ }
+ this_name_len = strlen(this->name);
+ ret = dict_get_int32n(input, this->name, this_name_len, &xl_id);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_GET_FAILED,
+ "key=%s", this->name, NULL);
+ goto out;
+ }
+ ret = dict_set_int32n(output, this->name, this_name_len, xl_id);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_DICT_SET_FAILED,
+ "key=%s", this->name, NULL);
+ goto out;
+ }
+ switch (op) {
case GF_SHD_OP_HEAL_INDEX:
- op_ret = 0;
-
- for (i = 0; i < priv->child_count; i++) {
- healer = &shd->index_healers[i];
- snprintf (key, sizeof (key), "%d-%d-status", xl_id, i);
-
- if (!priv->child_up[i]) {
- ret = dict_set_str (output, key,
- "Brick is not connected");
- op_ret = -1;
- } else if (AFR_COUNT (priv->child_up,
- priv->child_count) < 2) {
- ret = dict_set_str (output, key,
- "< 2 bricks in replica are up");
- op_ret = -1;
- } else if (!afr_shd_is_subvol_local (this, healer->subvol)) {
- ret = dict_set_str (output, key,
- "Brick is remote");
- } else {
- ret = dict_set_str (output, key,
- "Started self-heal");
- afr_shd_index_healer_spawn (this, i);
- }
- }
- break;
+ op_ret = 0;
+
+ for (i = 0; i < priv->child_count; i++) {
+ healer = &shd->index_healers[i];
+ keylen = snprintf(key, sizeof(key), "%d-%d-status", xl_id, i);
+
+ if (!priv->child_up[i]) {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SBRICK_NOT_CONNECTED,
+ SLEN(SBRICK_NOT_CONNECTED));
+ op_ret = -1;
+ } else if (AFR_COUNT(priv->child_up, priv->child_count) < 2) {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SLESS_THAN2_BRICKS_in_REP,
+ SLEN(SLESS_THAN2_BRICKS_in_REP));
+ op_ret = -1;
+ } else if (!afr_shd_is_subvol_local(this, healer->subvol)) {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SBRICK_IS_REMOTE,
+ SLEN(SBRICK_IS_REMOTE));
+ } else {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SSTARTED_SELF_HEAL,
+ SLEN(SSTARTED_SELF_HEAL));
+
+ ret = afr_shd_index_healer_spawn(this, i);
+
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret,
+ AFR_MSG_HEALER_SPAWN_FAILED, NULL);
+ }
+ }
+ }
+ break;
case GF_SHD_OP_HEAL_FULL:
- op_ret = -1;
-
- for (i = 0; i < priv->child_count; i++) {
- healer = &shd->full_healers[i];
- snprintf (key, sizeof (key), "%d-%d-status", xl_id, i);
-
- if (!priv->child_up[i]) {
- ret = dict_set_str (output, key,
- "Brick is not connected");
- } else if (AFR_COUNT (priv->child_up,
- priv->child_count) < 2) {
- ret = dict_set_str (output, key,
- "< 2 bricks in replica are up");
- } else if (!afr_shd_is_subvol_local (this, healer->subvol)) {
- ret = dict_set_str (output, key,
- "Brick is remote");
- } else {
- ret = dict_set_str (output, key,
- "Started self-heal");
- afr_shd_full_healer_spawn (this, i);
- op_ret = 0;
- }
- }
- break;
- case GF_SHD_OP_INDEX_SUMMARY:
- /* this case has been handled in glfs-heal.c */
- break;
- case GF_SHD_OP_HEALED_FILES:
- case GF_SHD_OP_HEAL_FAILED_FILES:
- for (i = 0; i < priv->child_count; i++) {
- snprintf (key, sizeof (key), "%d-%d-status", xl_id, i);
- ret = dict_set_str (output, key, "Operation Not "
- "Supported");
+ op_ret = -1;
+
+ for (i = 0; i < priv->child_count; i++) {
+ healer = &shd->full_healers[i];
+ keylen = snprintf(key, sizeof(key), "%d-%d-status", xl_id, i);
+
+ if (!priv->child_up[i]) {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SBRICK_NOT_CONNECTED,
+ SLEN(SBRICK_NOT_CONNECTED));
+ } else if (AFR_COUNT(priv->child_up, priv->child_count) < 2) {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SLESS_THAN2_BRICKS_in_REP,
+ SLEN(SLESS_THAN2_BRICKS_in_REP));
+ } else if (!afr_shd_is_subvol_local(this, healer->subvol)) {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SBRICK_IS_REMOTE,
+ SLEN(SBRICK_IS_REMOTE));
+ } else {
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SSTARTED_SELF_HEAL,
+ SLEN(SSTARTED_SELF_HEAL));
+
+ ret = afr_shd_full_healer_spawn(this, i);
+
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret,
+ AFR_MSG_HEALER_SPAWN_FAILED, NULL);
+ }
+ op_ret = 0;
}
- break;
+ }
+ break;
+ case GF_SHD_OP_INDEX_SUMMARY:
+ /* this case has been handled in glfs-heal.c */
+ break;
case GF_SHD_OP_SPLIT_BRAIN_FILES:
- eh_dump (shd->split_brain, output, afr_add_shd_event);
- break;
+ eh_dump(shd->split_brain, output, afr_add_shd_event);
+ break;
case GF_SHD_OP_STATISTICS:
- for (i = 0; i < priv->child_count; i++) {
- eh_dump (shd->statistics[i], output,
- afr_add_crawl_event);
- afr_shd_dict_add_crawl_event (this, output,
- &shd->index_healers[i].crawl_event);
- afr_shd_dict_add_crawl_event (this, output,
- &shd->full_healers[i].crawl_event);
- }
- break;
+ for (i = 0; i < priv->child_count; i++) {
+ eh_dump(shd->statistics[i], output, afr_add_crawl_event);
+ ret = afr_shd_dict_add_crawl_event(
+ this, output, &shd->index_healers[i].crawl_event);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret,
+ AFR_MSG_ADD_CRAWL_EVENT_FAILED, NULL);
+ }
+
+ ret = afr_shd_dict_add_crawl_event(
+ this, output, &shd->full_healers[i].crawl_event);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret,
+ AFR_MSG_ADD_CRAWL_EVENT_FAILED, NULL);
+ }
+ }
+ break;
case GF_SHD_OP_STATISTICS_HEAL_COUNT:
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- op_ret = -1;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!priv->child_up[i]) {
- snprintf (key, sizeof (key), "%d-%d-status",
- xl_id, i);
- ret = dict_set_str (output, key,
- "Brick is not connected");
- } else {
- snprintf (key, sizeof (key), "%d-%d-hardlinks",
- xl_id, i);
- ret = afr_shd_get_index_count (this, i, &cnt);
- if (ret == 0) {
- ret = dict_set_uint64 (output, key, cnt);
- }
- op_ret = 0;
- }
- }
-
-// ret = _do_crawl_op_on_local_subvols (this, INDEX_TO_BE_HEALED,
-// STATISTICS_TO_BE_HEALED,
-// output);
- break;
+ op_ret = -1;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!priv->child_up[i]) {
+ keylen = snprintf(key, sizeof(key), "%d-%d-status", xl_id,
+ i);
+ AFR_SET_DICT_AND_LOG(this->name, output, key, keylen,
+ SBRICK_NOT_CONNECTED,
+ SLEN(SBRICK_NOT_CONNECTED));
+ } else {
+ snprintf(key, sizeof(key), "%d-%d-hardlinks", xl_id, i);
+ ret = afr_shd_get_index_count(this, i, &cnt);
+ if (ret == 0) {
+ ret = dict_set_uint64(output, key, cnt);
+ }
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret,
+ AFR_MSG_DICT_SET_FAILED, NULL);
+ }
+ op_ret = 0;
+ }
+ }
+
+ break;
default:
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_ARG, "Unknown set op %d", op);
- break;
- }
+ gf_smsg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_ARG, "op=%d",
+ op, NULL);
+ break;
+ }
out:
- dict_del (output, this->name);
- return op_ret;
+ dict_deln(output, this->name, this_name_len);
+ return op_ret;
+
+#undef AFR_SET_DICT_AND_LOG
}
diff --git a/xlators/cluster/afr/src/afr-self-heald.h b/xlators/cluster/afr/src/afr-self-heald.h
index f591515669c..18db728ea7b 100644
--- a/xlators/cluster/afr/src/afr-self-heald.h
+++ b/xlators/cluster/afr/src/afr-self-heald.h
@@ -8,72 +8,68 @@
cases as published by the Free Software Foundation.
*/
-
#ifndef _AFR_SELF_HEALD_H
#define _AFR_SELF_HEALD_H
#include <pthread.h>
-
typedef struct {
- int child;
- char *path;
+ char *path;
+ int child;
} shd_event_t;
typedef struct {
- int child;
- uint64_t healed_count;
- uint64_t split_brain_count;
- uint64_t heal_failed_count;
-
- /* If start_time is 0, it means crawler is not in progress
- and stats are not valid */
- time_t start_time;
- /* If start_time is NOT 0 and end_time is 0, it means
- cralwer is in progress */
- time_t end_time;
- char *crawl_type;
+ uint64_t healed_count;
+ uint64_t split_brain_count;
+ uint64_t heal_failed_count;
+
+ /* If start_time is 0, it means crawler is not in progress
+ and stats are not valid */
+ time_t start_time;
+ /* If start_time is NOT 0 and end_time is 0, it means
+ cralwer is in progress */
+ time_t end_time;
+ char *crawl_type;
+ int child;
} crawl_event_t;
struct subvol_healer {
- xlator_t *this;
- int subvol;
- gf_boolean_t local;
- gf_boolean_t running;
- gf_boolean_t rerun;
- crawl_event_t crawl_event;
- pthread_mutex_t mutex;
- pthread_cond_t cond;
- pthread_t thread;
+ xlator_t *this;
+ crawl_event_t crawl_event;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_t thread;
+ int subvol;
+ gf_boolean_t local;
+ gf_boolean_t running;
+ gf_boolean_t rerun;
};
typedef struct {
- gf_boolean_t iamshd;
- gf_boolean_t enabled;
- int timeout;
- struct subvol_healer *index_healers;
- struct subvol_healer *full_healers;
-
- eh_t *split_brain;
- eh_t **statistics;
- uint32_t max_threads;
- uint32_t wait_qlength;
+ struct subvol_healer *index_healers;
+ struct subvol_healer *full_healers;
+
+ eh_t *split_brain;
+ eh_t **statistics;
+ int timeout;
+ uint32_t max_threads;
+ uint32_t wait_qlength;
+ uint32_t halo_max_latency_msec;
+ gf_boolean_t iamshd;
+ gf_boolean_t enabled;
} afr_self_heald_t;
-
-int
-afr_selfheal_childup (xlator_t *this, int subvol);
-
int
-afr_selfheal_daemon_init (xlator_t *this);
+afr_selfheal_daemon_init(xlator_t *this);
int
-afr_xl_op (xlator_t *this, dict_t *input, dict_t *output);
+afr_xl_op(xlator_t *this, dict_t *input, dict_t *output);
int
-afr_shd_gfid_to_path (xlator_t *this, xlator_t *subvol, uuid_t gfid,
- char **path_p);
+afr_shd_gfid_to_path(xlator_t *this, xlator_t *subvol, uuid_t gfid,
+ char **path_p);
int
-afr_shd_index_purge (xlator_t *subvol, inode_t *inode, char *name);
+afr_shd_entry_purge(xlator_t *subvol, inode_t *inode, char *name,
+ ia_type_t type);
#endif /* !_AFR_SELF_HEALD_H */
diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
index 92f68b91113..a51f79b1f43 100644
--- a/xlators/cluster/afr/src/afr-transaction.c
+++ b/xlators/cluster/afr/src/afr-transaction.c
@@ -8,10 +8,10 @@
cases as published by the Free Software Foundation.
*/
-#include "dict.h"
-#include "byte-order.h"
-#include "common-utils.h"
-#include "timer.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/timer.h>
#include "afr.h"
#include "afr-transaction.h"
@@ -20,1991 +20,2908 @@
#include <signal.h>
+typedef enum {
+ AFR_TRANSACTION_PRE_OP,
+ AFR_TRANSACTION_POST_OP,
+} afr_xattrop_type_t;
+
+static void
+afr_lock_resume_shared(struct list_head *list);
+
+static void
+afr_post_op_handle_success(call_frame_t *frame, xlator_t *this);
+
+static void
+afr_post_op_handle_failure(call_frame_t *frame, xlator_t *this, int op_errno);
+
+void
+__afr_transaction_wake_shared(afr_local_t *local, struct list_head *shared);
+
+void
+afr_changelog_post_op_do(call_frame_t *frame, xlator_t *this);
+
+int
+afr_changelog_post_op_safe(call_frame_t *frame, xlator_t *this);
+
gf_boolean_t
-afr_changelog_pre_op_uninherit (call_frame_t *frame, xlator_t *this);
+afr_changelog_pre_op_uninherit(call_frame_t *frame, xlator_t *this);
gf_boolean_t
-afr_changelog_pre_op_update (call_frame_t *frame, xlator_t *this);
+afr_changelog_pre_op_update(call_frame_t *frame, xlator_t *this);
int
-afr_changelog_do (call_frame_t *frame, xlator_t *this, dict_t *xattr,
- afr_changelog_resume_t changelog_resume);
+afr_changelog_call_count(afr_transaction_type type,
+ unsigned char *pre_op_subvols,
+ unsigned char *failed_subvols,
+ unsigned int child_count);
+int
+afr_changelog_do(call_frame_t *frame, xlator_t *this, dict_t *xattr,
+ afr_changelog_resume_t changelog_resume,
+ afr_xattrop_type_t op);
-static int32_t
-afr_quorum_errno (afr_private_t *priv)
+static void
+afr_ta_decide_post_op_state(call_frame_t *frame, xlator_t *this);
+
+static int
+afr_ta_post_op_do(void *opaque);
+
+static int
+afr_ta_post_op_synctask(xlator_t *this, afr_local_t *local);
+
+static int
+afr_changelog_post_op_done(call_frame_t *frame, xlator_t *this);
+
+static void
+afr_changelog_post_op_fail(call_frame_t *frame, xlator_t *this, int op_errno);
+
+void
+afr_ta_locked_priv_invalidate(afr_private_t *priv)
+{
+ priv->ta_bad_child_index = AFR_CHILD_UNKNOWN;
+ priv->release_ta_notify_dom_lock = _gf_false;
+ priv->ta_notify_dom_lock_offset = 0;
+}
+
+static void
+afr_ta_process_waitq(xlator_t *this)
{
- if (priv->quorum_reads)
- return ENOTCONN;
- return EROFS;
+ afr_local_t *entry = NULL;
+ afr_private_t *priv = this->private;
+ struct list_head waitq = {
+ 0,
+ };
+
+ INIT_LIST_HEAD(&waitq);
+ LOCK(&priv->lock);
+ list_splice_init(&priv->ta_waitq, &waitq);
+ UNLOCK(&priv->lock);
+ list_for_each_entry(entry, &waitq, ta_waitq)
+ {
+ afr_ta_decide_post_op_state(entry->transaction.frame, this);
+ }
}
int
-__afr_txn_write_fop (call_frame_t *frame, xlator_t *this)
+afr_ta_lock_release_done(int ret, call_frame_t *ta_frame, void *opaque)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = -1;
- unsigned char *failed_subvols = NULL;
- int i = 0;
+ afr_ta_process_waitq(ta_frame->this);
+ STACK_DESTROY(ta_frame->root);
+ return 0;
+}
- local = frame->local;
- priv = this->private;
+int
+afr_release_notify_lock_for_ta(void *opaque)
+{
+ xlator_t *this = NULL;
+ afr_private_t *priv = NULL;
+ loc_t loc = {
+ 0,
+ };
+ struct gf_flock flock = {
+ 0,
+ };
+ int ret = -1;
+
+ this = (xlator_t *)opaque;
+ priv = this->private;
+ ret = afr_fill_ta_loc(this, &loc, _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to populate loc for thin-arbiter.");
+ goto out;
+ }
+ flock.l_type = F_UNLCK;
+ flock.l_start = priv->ta_notify_dom_lock_offset;
+ flock.l_len = 1;
+ ret = syncop_inodelk(priv->children[THIN_ARBITER_BRICK_INDEX],
+ AFR_TA_DOM_NOTIFY, &loc, F_SETLK, &flock, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to unlock AFR_TA_DOM_NOTIFY lock.");
+ }
+
+ LOCK(&priv->lock);
+ {
+ afr_ta_locked_priv_invalidate(priv);
+ }
+ UNLOCK(&priv->lock);
+out:
+ loc_wipe(&loc);
+ return ret;
+}
- failed_subvols = local->transaction.failed_subvols;
+void
+afr_zero_fill_stat(afr_local_t *local)
+{
+ if (!local)
+ return;
+ if (local->transaction.type == AFR_DATA_TRANSACTION ||
+ local->transaction.type == AFR_METADATA_TRANSACTION) {
+ gf_zero_fill_stat(&local->cont.inode_wfop.prebuf);
+ gf_zero_fill_stat(&local->cont.inode_wfop.postbuf);
+ } else if (local->transaction.type == AFR_ENTRY_TRANSACTION ||
+ local->transaction.type == AFR_ENTRY_RENAME_TRANSACTION) {
+ gf_zero_fill_stat(&local->cont.dir_fop.buf);
+ gf_zero_fill_stat(&local->cont.dir_fop.preparent);
+ gf_zero_fill_stat(&local->cont.dir_fop.postparent);
+ if (local->transaction.type == AFR_ENTRY_TRANSACTION)
+ return;
+ gf_zero_fill_stat(&local->cont.dir_fop.prenewparent);
+ gf_zero_fill_stat(&local->cont.dir_fop.postnewparent);
+ }
+}
+
+/* In case of errors afr needs to choose which xdata from lower xlators it needs
+ * to unwind with. The way it is done is by checking if there are
+ * any good subvols which failed. Give preference to errnos other than
+ * ENOTCONN even if the child is source */
+void
+afr_pick_error_xdata(afr_local_t *local, afr_private_t *priv, inode_t *inode1,
+ unsigned char *readable1, inode_t *inode2,
+ unsigned char *readable2)
+{
+ int s = -1; /*selection*/
+ int i = 0;
+ unsigned char *readable = NULL;
+
+ if (local->xdata_rsp) {
+ dict_unref(local->xdata_rsp);
+ local->xdata_rsp = NULL;
+ }
+
+ readable = alloca0(priv->child_count * sizeof(*readable));
+ if (inode2 && readable2) { /*rename fop*/
+ AFR_INTERSECT(readable, readable1, readable2, priv->child_count);
+ } else {
+ memcpy(readable, readable1, sizeof(*readable) * priv->child_count);
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
+
+ if (local->replies[i].op_ret >= 0)
+ continue;
+
+ if (local->replies[i].op_errno == ENOTCONN)
+ continue;
+
+ /*Order is important in the following condition*/
+ if ((s < 0) || (!readable[s] && readable[i]))
+ s = i;
+ }
+
+ if (s != -1 && local->replies[s].xdata) {
+ local->xdata_rsp = dict_ref(local->replies[s].xdata);
+ } else if (s == -1) {
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->replies[i].valid)
+ continue;
- call_count = priv->child_count - AFR_COUNT (failed_subvols,
- priv->child_count);
+ if (local->replies[i].op_ret >= 0)
+ continue;
- if (call_count == 0) {
- local->transaction.resume (frame, this);
- return 0;
+ if (!local->replies[i].xdata)
+ continue;
+ local->xdata_rsp = dict_ref(local->replies[i].xdata);
+ break;
}
+ }
+}
+
+gf_boolean_t
+afr_needs_changelog_update(afr_local_t *local)
+{
+ if (local->transaction.type == AFR_DATA_TRANSACTION)
+ return _gf_true;
+ if (!local->optimistic_change_log)
+ return _gf_true;
+ return _gf_false;
+}
- local->call_count = call_count;
+gf_boolean_t
+afr_changelog_has_quorum(afr_local_t *local, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ int i = 0;
+ unsigned char *success_children = NULL;
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.pre_op[i] && !failed_subvols[i]) {
- local->transaction.wind (frame, this, i);
+ priv = this->private;
+ success_children = alloca0(priv->child_count);
- if (!--call_count)
- break;
- }
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.failed_subvols[i]) {
+ success_children[i] = 1;
}
+ }
- return 0;
+ if (afr_has_quorum(success_children, this, NULL)) {
+ return _gf_true;
+ }
+
+ return _gf_false;
}
+gf_boolean_t
+afr_is_write_subvol_valid(call_frame_t *frame, xlator_t *this)
+{
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ uint64_t write_subvol = 0;
+ unsigned char *writable = NULL;
+ uint16_t datamap = 0;
+
+ local = frame->local;
+ priv = this->private;
+ writable = alloca0(priv->child_count);
+
+ write_subvol = afr_write_subvol_get(frame, this);
+ datamap = (write_subvol & 0x00000000ffff0000) >> 16;
+ for (i = 0; i < priv->child_count; i++) {
+ if (datamap & (1 << i))
+ writable[i] = 1;
+
+ if (writable[i] && !local->transaction.failed_subvols[i])
+ return _gf_true;
+ }
+
+ return _gf_false;
+}
int
-__afr_txn_write_done (call_frame_t *frame, xlator_t *this)
+afr_transaction_fop(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int call_count = -1;
+ unsigned char *failed_subvols = NULL;
+ int i = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ failed_subvols = local->transaction.failed_subvols;
+ call_count = priv->child_count -
+ AFR_COUNT(failed_subvols, priv->child_count);
+ /* Fail if pre-op did not succeed on quorum no. of bricks. */
+ if (!afr_changelog_has_quorum(local, this) || !call_count) {
+ local->op_ret = -1;
+ /* local->op_errno is already captured in changelog cbk. */
+ afr_transaction_resume(frame, this);
+ return 0;
+ }
- local = frame->local;
+ /* Fail if at least one writeable brick isn't up.*/
+ if (local->transaction.type == AFR_DATA_TRANSACTION &&
+ !afr_is_write_subvol_valid(frame, this)) {
+ local->op_ret = -1;
+ local->op_errno = EIO;
+ afr_transaction_resume(frame, this);
+ return 0;
+ }
- local->transaction.unwind (frame, this);
+ local->call_count = call_count;
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.pre_op[i] && !failed_subvols[i]) {
+ local->transaction.wind(frame, this, i);
- AFR_STACK_DESTROY (frame);
+ if (!--call_count)
+ break;
+ }
+ }
- return 0;
+ return 0;
}
-
-call_frame_t*
-afr_transaction_detach_fop_frame (call_frame_t *frame)
+int
+afr_transaction_done(call_frame_t *frame, xlator_t *this)
{
- afr_local_t * local = NULL;
- call_frame_t *fop_frame = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ gf_boolean_t unwind = _gf_false;
+ afr_lock_t *lock = NULL;
+ afr_local_t *lock_local = NULL;
- local = frame->local;
+ priv = this->private;
+ local = frame->local;
- LOCK (&frame->lock);
+ if (priv->consistent_metadata) {
+ LOCK(&frame->lock);
{
- fop_frame = local->transaction.main_frame;
- local->transaction.main_frame = NULL;
+ unwind = (local->transaction.main_frame != NULL);
}
- UNLOCK (&frame->lock);
-
- return fop_frame;
+ UNLOCK(&frame->lock);
+ if (unwind) /*It definitely did post-op*/
+ afr_zero_fill_stat(local);
+ }
+
+ if (local->transaction.do_eager_unlock) {
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ LOCK(&local->inode->lock);
+ {
+ lock->acquired = _gf_false;
+ lock->release = _gf_false;
+ list_splice_init(&lock->frozen, &lock->waiting);
+ if (list_empty(&lock->waiting))
+ goto unlock;
+ lock_local = list_entry(lock->waiting.next, afr_local_t,
+ transaction.wait_list);
+ list_del_init(&lock_local->transaction.wait_list);
+ list_add(&lock_local->transaction.owner_list, &lock->owners);
+ }
+ unlock:
+ UNLOCK(&local->inode->lock);
+ }
+ if (lock_local) {
+ afr_lock(lock_local->transaction.frame,
+ lock_local->transaction.frame->this);
+ }
+ local->transaction.unwind(frame, this);
+
+ GF_ASSERT(list_empty(&local->transaction.owner_list));
+ GF_ASSERT(list_empty(&local->transaction.wait_list));
+ AFR_STACK_DESTROY(frame);
+
+ return 0;
}
+static void
+afr_lock_fail_shared(afr_local_t *local, struct list_head *list)
+{
+ afr_local_t *each = NULL;
+
+ while (!list_empty(list)) {
+ each = list_entry(list->next, afr_local_t, transaction.wait_list);
+ list_del_init(&each->transaction.wait_list);
+ each->op_ret = -1;
+ each->op_errno = local->op_errno;
+ afr_transaction_done(each->transaction.frame,
+ each->transaction.frame->this);
+ }
+}
static void
-afr_save_lk_owner (call_frame_t *frame)
+afr_handle_lock_acquire_failure(afr_local_t *local)
{
- afr_local_t * local = NULL;
+ struct list_head shared;
+ afr_lock_t *lock = NULL;
+
+ if (!local->transaction.eager_lock_on)
+ goto out;
+
+ lock = &local->inode_ctx->lock[local->transaction.type];
- local = frame->local;
+ INIT_LIST_HEAD(&shared);
+ LOCK(&local->inode->lock);
+ {
+ lock->release = _gf_true;
+ list_splice_init(&lock->waiting, &shared);
+ }
+ UNLOCK(&local->inode->lock);
- local->saved_lk_owner = frame->root->lk_owner;
+ afr_lock_fail_shared(local, &shared);
+ local->transaction.do_eager_unlock = _gf_true;
+out:
+ local->internal_lock.lock_cbk = afr_transaction_done;
+ afr_unlock(local->transaction.frame, local->transaction.frame->this);
}
+call_frame_t *
+afr_transaction_detach_fop_frame(call_frame_t *frame)
+{
+ afr_local_t *local = NULL;
+ call_frame_t *fop_frame = NULL;
+
+ local = frame->local;
+
+ afr_handle_inconsistent_fop(frame, &local->op_ret, &local->op_errno);
+ LOCK(&frame->lock);
+ {
+ fop_frame = local->transaction.main_frame;
+ local->transaction.main_frame = NULL;
+ }
+ UNLOCK(&frame->lock);
+
+ return fop_frame;
+}
static void
-afr_restore_lk_owner (call_frame_t *frame)
+afr_save_lk_owner(call_frame_t *frame)
{
- afr_local_t * local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- frame->root->lk_owner = local->saved_lk_owner;
+ local->saved_lk_owner = frame->root->lk_owner;
}
-void
-__mark_all_success (call_frame_t *frame, xlator_t *this)
+static void
+afr_restore_lk_owner(call_frame_t *frame)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i;
+ afr_local_t *local = NULL;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
- for (i = 0; i < priv->child_count; i++) {
- local->transaction.failed_subvols[i] = 0;
- }
+ frame->root->lk_owner = local->saved_lk_owner;
}
void
-afr_compute_pre_op_sources (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- afr_transaction_type type = -1;
- dict_t *xdata = NULL;
- int **matrix = NULL;
- int idx = -1;
- int i = 0;
- int j = 0;
-
- priv = this->private;
- local = frame->local;
- type = local->transaction.type;
- idx = afr_index_for_transaction_type (type);
- matrix = ALLOC_MATRIX (priv->child_count, int);
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->transaction.pre_op_xdata[i])
- continue;
- xdata = local->transaction.pre_op_xdata[i];
- afr_selfheal_fill_matrix (this, matrix, i, idx, xdata);
- }
-
- memset (local->transaction.pre_op_sources, 1, priv->child_count);
+__mark_all_success(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i;
- /*If lock or pre-op failed on a brick, it is not a source. */
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.failed_subvols[i])
- local->transaction.pre_op_sources[i] = 0;
- }
+ local = frame->local;
+ priv = this->private;
- /* If brick is blamed by others, it is not a source. */
- for (i = 0; i < priv->child_count; i++)
- for (j = 0; j < priv->child_count; j++)
- if (matrix[i][j] != 0)
- local->transaction.pre_op_sources[j] = 0;
+ for (i = 0; i < priv->child_count; i++) {
+ local->transaction.failed_subvols[i] = 0;
+ }
+}
- /*We don't need the xattrs any more. */
- for (i = 0; i < priv->child_count; i++)
- if (local->transaction.pre_op_xdata[i]) {
- dict_unref (local->transaction.pre_op_xdata[i]);
- local->transaction.pre_op_xdata[i] = NULL;
- }
+void
+afr_compute_pre_op_sources(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_transaction_type type = -1;
+ dict_t *xdata = NULL;
+ int **matrix = NULL;
+ int idx = -1;
+ int i = 0;
+ int j = 0;
+
+ priv = this->private;
+ local = frame->local;
+ type = local->transaction.type;
+ idx = afr_index_for_transaction_type(type);
+ matrix = ALLOC_MATRIX(priv->child_count, int);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.changelog_xdata[i])
+ continue;
+ xdata = local->transaction.changelog_xdata[i];
+ afr_selfheal_fill_matrix(this, matrix, i, idx, xdata);
+ }
+
+ memset(local->transaction.pre_op_sources, 1, priv->child_count);
+
+ /*If lock or pre-op failed on a brick, it is not a source. */
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.failed_subvols[i])
+ local->transaction.pre_op_sources[i] = 0;
+ }
+
+ /* If brick is blamed by others, it is not a source. */
+ for (i = 0; i < priv->child_count; i++)
+ for (j = 0; j < priv->child_count; j++)
+ if (matrix[i][j] != 0)
+ local->transaction.pre_op_sources[j] = 0;
}
void
-afr_txn_arbitrate_fop_cbk (call_frame_t *frame, xlator_t *this)
+afr_txn_arbitrate_fop(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- gf_boolean_t fop_failed = _gf_false;
- unsigned char *pre_op_sources = NULL;
- int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int pre_op_sources_count = 0;
+ int i = 0;
- local = frame->local;
- priv = this->private;
- pre_op_sources = local->transaction.pre_op_sources;
+ priv = this->private;
+ local = frame->local;
- if (priv->arbiter_count != 1 || local->op_ret < 0)
- return;
+ afr_compute_pre_op_sources(frame, this);
+ pre_op_sources_count = AFR_COUNT(local->transaction.pre_op_sources,
+ priv->child_count);
- /* If the fop failed on the brick, it is not a source. */
+ /* If arbiter is the only source, do not proceed. */
+ if (pre_op_sources_count < 2 &&
+ local->transaction.pre_op_sources[ARBITER_BRICK_INDEX]) {
+ local->op_ret = -1;
+ local->op_errno = ENOTCONN;
for (i = 0; i < priv->child_count; i++)
- if (local->transaction.failed_subvols[i])
- pre_op_sources[i] = 0;
-
- switch (AFR_COUNT (pre_op_sources, priv->child_count)) {
- case 1:
- if (pre_op_sources[ARBITER_BRICK_INDEX])
- fop_failed = _gf_true;
- break;
- case 0:
- fop_failed = _gf_true;
- break;
- }
+ local->transaction.failed_subvols[i] = 1;
+ }
- if (fop_failed) {
- local->op_ret = -1;
- local->op_errno = ENOTCONN;
- }
+ afr_transaction_fop(frame, this);
- return;
+ return;
}
-void
-afr_txn_arbitrate_fop (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int pre_op_sources_count = 0;
-
- priv = this->private;
- local = frame->local;
-
- afr_compute_pre_op_sources (frame, this);
- pre_op_sources_count = AFR_COUNT (local->transaction.pre_op_sources,
- priv->child_count);
-
- /* If arbiter is the only source, do not proceed. */
- if (pre_op_sources_count < 2 &&
- local->transaction.pre_op_sources[ARBITER_BRICK_INDEX]) {
- local->internal_lock.lock_cbk = local->transaction.done;
- local->op_ret = -1;
- local->op_errno = ENOTCONN;
- afr_restore_lk_owner (frame);
- afr_unlock (frame, this);
- } else {
- local->transaction.fop (frame, this);
+int
+afr_transaction_perform_fop(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ int ret = 0;
+ int failure_count = 0;
+ struct list_head shared;
+ afr_lock_t *lock = NULL;
+
+ local = frame->local;
+ priv = this->private;
+
+ INIT_LIST_HEAD(&shared);
+ if (local->transaction.type == AFR_DATA_TRANSACTION &&
+ !local->transaction.inherited) {
+ ret = afr_write_subvol_set(frame, this);
+ if (ret) {
+ /*act as if operation failed on all subvols*/
+ local->op_ret = -1;
+ local->op_errno = -ret;
+ for (i = 0; i < priv->child_count; i++)
+ local->transaction.failed_subvols[i] = 1;
}
-
- return;
+ }
+
+ if (local->pre_op_compat)
+ /* old mode, pre-op was done as afr_changelog_do()
+ just now, before OP */
+ afr_changelog_pre_op_update(frame, this);
+
+ if (!local->transaction.eager_lock_on || local->transaction.inherited)
+ goto fop;
+ failure_count = AFR_COUNT(local->transaction.failed_subvols,
+ priv->child_count);
+ if (failure_count == priv->child_count) {
+ afr_handle_lock_acquire_failure(local);
+ return 0;
+ } else {
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ LOCK(&local->inode->lock);
+ {
+ lock->acquired = _gf_true;
+ __afr_transaction_wake_shared(local, &shared);
+ }
+ UNLOCK(&local->inode->lock);
+ }
+
+fop:
+ /* Perform fops with the lk-owner from top xlator.
+ * Eg: lk-owner of posix-lk and flush should be same,
+ * flush cant clear the posix-lks without that lk-owner.
+ */
+ afr_save_lk_owner(frame);
+ frame->root->lk_owner = local->transaction.main_frame->root->lk_owner;
+
+ if (priv->arbiter_count == 1) {
+ afr_txn_arbitrate_fop(frame, this);
+ } else {
+ afr_transaction_fop(frame, this);
+ }
+
+ afr_lock_resume_shared(&shared);
+ return 0;
}
int
-afr_transaction_perform_fop (call_frame_t *frame, xlator_t *this)
+afr_set_pending_dict(afr_private_t *priv, dict_t *xattr, int **pending)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- fd_t *fd = NULL;
+ int i = 0;
+ int ret = 0;
- local = frame->local;
- priv = this->private;
- fd = local->fd;
+ for (i = 0; i < priv->child_count; i++) {
+ ret = dict_set_static_bin(xattr, priv->pending_key[i], pending[i],
+ AFR_NUM_CHANGE_LOGS * sizeof(int));
+ /* 3 = data+metadata+entry */
- /* Perform fops with the lk-owner from top xlator.
- * Eg: lk-owner of posix-lk and flush should be same,
- * flush cant clear the posix-lks without that lk-owner.
- */
- afr_save_lk_owner (frame);
- frame->root->lk_owner =
- local->transaction.main_frame->root->lk_owner;
-
- if (local->pre_op_compat)
- /* old mode, pre-op was done as afr_changelog_do()
- just now, before OP */
- afr_changelog_pre_op_update (frame, this);
-
- /* The wake up needs to happen independent of
- what type of fop arrives here. If it was
- a write, then it has already inherited the
- lock and changelog. If it was not a write,
- then the presumption of the optimization (of
- optimizing for successive write operations)
- fails.
- */
- if (fd)
- afr_delayed_changelog_wake_up (this, fd);
- if (priv->arbiter_count == 1) {
- afr_txn_arbitrate_fop (frame, this);
- } else {
- local->transaction.fop (frame, this);
- }
+ if (ret)
+ break;
+ }
- return 0;
+ return ret;
}
-static int
-__changelog_enabled (afr_private_t *priv, afr_transaction_type type)
+static void
+afr_ta_dom_lock_check_and_release(afr_ta_fop_state_t fop_state, xlator_t *this)
{
- int ret = 0;
-
- switch (type) {
- case AFR_DATA_TRANSACTION:
- if (priv->data_change_log)
- ret = 1;
-
+ afr_private_t *priv = this->private;
+ unsigned int inmem_count = 0;
+ unsigned int onwire_count = 0;
+ gf_boolean_t release = _gf_false;
+
+ LOCK(&priv->lock);
+ {
+ /*Once we get notify lock release upcall notification,
+ if any of the fop state counters are non-zero, we will
+ not release the lock.
+ */
+ onwire_count = priv->ta_on_wire_txn_count;
+ inmem_count = priv->ta_in_mem_txn_count;
+ switch (fop_state) {
+ case TA_GET_INFO_FROM_TA_FILE:
+ onwire_count = --priv->ta_on_wire_txn_count;
break;
-
- case AFR_METADATA_TRANSACTION:
- if (priv->metadata_change_log)
- ret = 1;
-
+ case TA_INFO_IN_MEMORY_SUCCESS:
+ case TA_INFO_IN_MEMORY_FAILED:
+ inmem_count = --priv->ta_in_mem_txn_count;
break;
-
- case AFR_ENTRY_TRANSACTION:
- case AFR_ENTRY_RENAME_TRANSACTION:
- if (priv->entry_change_log)
- ret = 1;
-
+ case TA_WAIT_FOR_NOTIFY_LOCK_REL:
+ GF_ASSERT(0);
+ break;
+ case TA_SUCCESS:
break;
}
+ release = priv->release_ta_notify_dom_lock;
+ }
+ UNLOCK(&priv->lock);
- return ret;
-}
+ if (inmem_count != 0 || release == _gf_false || onwire_count != 0)
+ return;
+ afr_ta_lock_release_synctask(this);
+}
-static int
-__fop_changelog_needed (call_frame_t *frame, xlator_t *this)
+static void
+afr_ta_process_onwireq(afr_ta_fop_state_t fop_state, xlator_t *this)
{
- afr_private_t * priv = NULL;
- afr_local_t * local = NULL;
- int op_ret = 0;
- afr_transaction_type type = -1;
-
- priv = this->private;
- local = frame->local;
- type = local->transaction.type;
-
- if (__changelog_enabled (priv, type)) {
- switch (local->op) {
-
- case GF_FOP_WRITE:
- case GF_FOP_FTRUNCATE:
- op_ret = 1;
- break;
-
- case GF_FOP_FLUSH:
- op_ret = 0;
- break;
-
- default:
- op_ret = 1;
- }
+ afr_private_t *priv = this->private;
+ afr_local_t *entry = NULL;
+ int bad_child = AFR_CHILD_UNKNOWN;
+
+ struct list_head onwireq = {
+ 0,
+ };
+ INIT_LIST_HEAD(&onwireq);
+
+ LOCK(&priv->lock);
+ {
+ bad_child = priv->ta_bad_child_index;
+ if (bad_child == AFR_CHILD_UNKNOWN) {
+ /*The previous on-wire ta_post_op was a failure. Just dequeue
+ *one element to wind on-wire again. */
+ entry = list_entry(priv->ta_onwireq.next, afr_local_t, ta_onwireq);
+ list_del_init(&entry->ta_onwireq);
+ } else {
+ /* Prepare to process all fops based on bad_child_index. */
+ list_splice_init(&priv->ta_onwireq, &onwireq);
}
+ }
+ UNLOCK(&priv->lock);
- return op_ret;
+ if (entry) {
+ afr_ta_post_op_synctask(this, entry);
+ return;
+ } else {
+ while (!list_empty(&onwireq)) {
+ entry = list_entry(onwireq.next, afr_local_t, ta_onwireq);
+ list_del_init(&entry->ta_onwireq);
+ if (entry->ta_failed_subvol == bad_child) {
+ afr_post_op_handle_success(entry->transaction.frame, this);
+ } else {
+ afr_post_op_handle_failure(entry->transaction.frame, this, EIO);
+ }
+ }
+ }
}
-
int
-afr_set_pending_dict (afr_private_t *priv, dict_t *xattr, int **pending)
+afr_changelog_post_op_done(call_frame_t *frame, xlator_t *this)
{
- int i = 0;
- int ret = 0;
+ afr_local_t *local = NULL;
+ afr_internal_lock_t *int_lock = NULL;
+ afr_private_t *priv = NULL;
- for (i = 0; i < priv->child_count; i++) {
+ local = frame->local;
+ priv = this->private;
+ int_lock = &local->internal_lock;
- ret = dict_set_static_bin (xattr, priv->pending_key[i],
- pending[i],
- AFR_NUM_CHANGE_LOGS * sizeof (int));
- /* 3 = data+metadata+entry */
+ if (priv->thin_arbiter_count) {
+ /*fop should not come here with TA_WAIT_FOR_NOTIFY_LOCK_REL state */
+ afr_ta_dom_lock_check_and_release(local->fop_state, this);
+ }
- if (ret)
- break;
- }
+ /* Fail the FOP if post-op did not succeed on quorum no. of bricks. */
+ if (!afr_changelog_has_quorum(local, this)) {
+ local->op_ret = -1;
+ /*local->op_errno is already captured in changelog cbk*/
+ }
- return ret;
-}
+ if (local->transaction.resume_stub) {
+ call_resume(local->transaction.resume_stub);
+ local->transaction.resume_stub = NULL;
+ }
-int
-afr_lock_server_count (afr_private_t *priv, afr_transaction_type type)
-{
- int ret = 0;
+ int_lock->lock_cbk = afr_transaction_done;
+ afr_unlock(frame, this);
- switch (type) {
- case AFR_DATA_TRANSACTION:
- ret = priv->child_count;
- break;
+ return 0;
+}
- case AFR_METADATA_TRANSACTION:
- ret = priv->child_count;
- break;
+static void
+afr_changelog_post_op_fail(call_frame_t *frame, xlator_t *this, int op_errno)
+{
+ afr_local_t *local = frame->local;
+ local->op_ret = -1;
+ local->op_errno = op_errno;
- case AFR_ENTRY_TRANSACTION:
- case AFR_ENTRY_RENAME_TRANSACTION:
- ret = priv->child_count;
- break;
- }
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, AFR_MSG_THIN_ARB,
+ "Failing %s for gfid %s. Fop state is:%d", gf_fop_list[local->op],
+ uuid_utoa(local->inode->gfid), local->fop_state);
- return ret;
+ afr_changelog_post_op_done(frame, this);
}
-/* {{{ pending */
-
+unsigned char *
+afr_locked_nodes_get(afr_transaction_type type, afr_internal_lock_t *int_lock)
+{
+ /*Because same set of subvols participate in all lockee
+ * entities*/
+ return int_lock->lockee[0].locked_nodes;
+}
int
-afr_changelog_post_op_done (call_frame_t *frame, xlator_t *this)
+afr_changelog_call_count(afr_transaction_type type,
+ unsigned char *pre_op_subvols,
+ unsigned char *failed_subvols,
+ unsigned int child_count)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- afr_internal_lock_t *int_lock = NULL;
+ int i = 0;
+ int call_count = 0;
- local = frame->local;
- priv = this->private;
- int_lock = &local->internal_lock;
-
- if (local->transaction.resume_stub) {
- call_resume (local->transaction.resume_stub);
- local->transaction.resume_stub = NULL;
- }
+ for (i = 0; i < child_count; i++) {
+ if (pre_op_subvols[i] && !failed_subvols[i]) {
+ call_count++;
+ }
+ }
- if (afr_lock_server_count (priv, local->transaction.type) == 0) {
- local->transaction.done (frame, this);
- } else {
- int_lock->lock_cbk = local->transaction.done;
- afr_unlock (frame, this);
- }
+ if (type == AFR_ENTRY_RENAME_TRANSACTION)
+ call_count *= 2;
- return 0;
+ return call_count;
}
-
-afr_inodelk_t*
-afr_get_inodelk (afr_internal_lock_t *int_lock, char *dom)
+gf_boolean_t
+afr_txn_nothing_failed(call_frame_t *frame, xlator_t *this)
{
- afr_inodelk_t *inodelk = NULL;
- int i = 0;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ if (priv->thin_arbiter_count) {
+ /* We need to perform post-op even if 1 data brick was down
+ * before the txn started.*/
+ if (AFR_COUNT(local->transaction.failed_subvols, priv->child_count))
+ return _gf_false;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.pre_op[i] &&
+ local->transaction.failed_subvols[i])
+ return _gf_false;
+ }
+
+ return _gf_true;
+}
- for (i = 0; int_lock->inodelk[i].domain; i++) {
- inodelk = &int_lock->inodelk[i];
- if (strcmp (dom, inodelk->domain) == 0)
- return inodelk;
- }
- return NULL;
+void
+afr_handle_symmetric_errors(call_frame_t *frame, xlator_t *this)
+{
+ if (afr_is_symmetric_error(frame, this))
+ __mark_all_success(frame, this);
}
-unsigned char*
-afr_locked_nodes_get (afr_transaction_type type, afr_internal_lock_t *int_lock)
+gf_boolean_t
+afr_has_quorum(unsigned char *subvols, xlator_t *this, call_frame_t *frame)
{
- unsigned char *locked_nodes = NULL;
- afr_inodelk_t *inodelk = NULL;
- switch (type) {
- case AFR_DATA_TRANSACTION:
- case AFR_METADATA_TRANSACTION:
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
- locked_nodes = inodelk->locked_nodes;
- break;
+ unsigned int quorum_count = 0;
+ afr_private_t *priv = NULL;
+ unsigned int up_children_count = 0;
- case AFR_ENTRY_TRANSACTION:
- case AFR_ENTRY_RENAME_TRANSACTION:
- /*Because same set of subvols participate in all lockee
- * entities*/
- locked_nodes = int_lock->lockee[0].locked_nodes;
- break;
+ priv = this->private;
+ up_children_count = AFR_COUNT(subvols, priv->child_count);
+
+ if (afr_lookup_has_quorum(frame, up_children_count))
+ return _gf_true;
+
+ if (priv->quorum_count == AFR_QUORUM_AUTO) {
+ /*
+ * Special case for auto-quorum with an even number of nodes.
+ *
+ * A replica set with even count N can only handle the same
+ * number of failures as odd N-1 before losing "vanilla"
+ * quorum, and the probability of more simultaneous failures is
+ * actually higher. For example, with a 1% chance of failure
+ * we'd have a 0.03% chance of two simultaneous failures with
+ * N=3 but a 0.06% chance with N=4. However, the special case
+ * is necessary for N=2 because there's no real quorum in that
+ * case (i.e. can't normally survive *any* failures). In that
+ * case, we treat the first node as a tie-breaker, allowing
+ * quorum to be retained in some cases while still honoring the
+ * all-important constraint that there can not simultaneously
+ * be two partitioned sets of nodes each believing they have
+ * quorum. Of two equally sized sets, the one without that
+ * first node will lose.
+ *
+ * It turns out that the special case is beneficial for higher
+ * values of N as well. Continuing the example above, the
+ * probability of losing quorum with N=4 and this type of
+ * quorum is (very) slightly lower than with N=3 and vanilla
+ * quorum. The difference becomes even more pronounced with
+ * higher N. Therefore, even though such replica counts are
+ * unlikely to be seen in practice, we might as well use the
+ * "special" quorum then as well.
+ */
+ if ((up_children_count * 2) == priv->child_count) {
+ return subvols[0];
}
- return locked_nodes;
-}
+ }
+ if (priv->quorum_count == AFR_QUORUM_AUTO) {
+ quorum_count = priv->child_count / 2 + 1;
+ } else {
+ quorum_count = priv->quorum_count;
+ }
-int
-afr_changelog_call_count (afr_transaction_type type,
- unsigned char *pre_op_subvols,
- unsigned int child_count)
-{
- int call_count = 0;
+ if (up_children_count >= quorum_count)
+ return _gf_true;
- call_count = AFR_COUNT(pre_op_subvols, child_count);
+ return _gf_false;
+}
- if (type == AFR_ENTRY_RENAME_TRANSACTION)
- call_count *= 2;
+static gf_boolean_t
+afr_has_fop_quorum(call_frame_t *frame)
+{
+ xlator_t *this = frame->this;
+ afr_local_t *local = frame->local;
+ unsigned char *locked_nodes = NULL;
- return call_count;
+ locked_nodes = afr_locked_nodes_get(local->transaction.type,
+ &local->internal_lock);
+ return afr_has_quorum(locked_nodes, this, NULL);
}
+static gf_boolean_t
+afr_has_fop_cbk_quorum(call_frame_t *frame)
+{
+ afr_local_t *local = frame->local;
+ xlator_t *this = frame->this;
+ afr_private_t *priv = this->private;
+ unsigned char *success = alloca0(priv->child_count);
+ int i = 0;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.pre_op[i])
+ if (!local->transaction.failed_subvols[i])
+ success[i] = 1;
+ }
+
+ return afr_has_quorum(success, this, NULL);
+}
gf_boolean_t
-afr_txn_nothing_failed (call_frame_t *frame, xlator_t *this)
+afr_need_dirty_marking(call_frame_t *frame, xlator_t *this)
{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
- int i = 0;
+ afr_private_t *priv = this->private;
+ afr_local_t *local = NULL;
+ gf_boolean_t need_dirty = _gf_false;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.pre_op[i] &&
- local->transaction.failed_subvols[i])
- return _gf_false;
- }
+ if (!priv->quorum_count || !local->optimistic_change_log)
+ return _gf_false;
- return _gf_true;
-}
+ if (local->transaction.type == AFR_DATA_TRANSACTION ||
+ local->transaction.type == AFR_METADATA_TRANSACTION)
+ return _gf_false;
+ if (AFR_COUNT(local->transaction.failed_subvols, priv->child_count) ==
+ priv->child_count)
+ return _gf_false;
-void
-afr_handle_symmetric_errors (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int op_errno = 0;
- int i_errno = 0;
- gf_boolean_t matching_errors = _gf_true;
- int i = 0;
-
- priv = this->private;
- local = frame->local;
-
- for (i = 0; i < priv->child_count; i++) {
- if (!local->replies[i].valid)
- continue;
- if (local->replies[i].op_ret != -1) {
- /* Operation succeeded on at least on subvol,
- so it is not a failed-everywhere situation.
- */
- matching_errors = _gf_false;
- break;
- }
- i_errno = local->replies[i].op_errno;
-
- if (i_errno == ENOTCONN) {
- /* ENOTCONN is not a symmetric error. We do not
- know if the operation was performed on the
- backend or not.
- */
- matching_errors = _gf_false;
- break;
- }
-
- if (!op_errno) {
- op_errno = i_errno;
- } else if (op_errno != i_errno) {
- /* Mismatching op_errno's */
- matching_errors = _gf_false;
- break;
- }
- }
-
- if (matching_errors)
- __mark_all_success (frame, this);
+ if (!afr_has_fop_cbk_quorum(frame))
+ need_dirty = _gf_true;
+
+ return need_dirty;
}
-gf_boolean_t
-afr_has_quorum (unsigned char *subvols, xlator_t *this)
+void
+afr_handle_quorum(call_frame_t *frame, xlator_t *this)
{
- unsigned int quorum_count = 0;
- afr_private_t *priv = NULL;
- unsigned int up_children_count = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ const char *file = NULL;
+ uuid_t gfid = {0};
- priv = this->private;
- up_children_count = AFR_COUNT (subvols, priv->child_count);
+ local = frame->local;
+ priv = frame->this->private;
- if (priv->quorum_count == AFR_QUORUM_AUTO) {
- /*
- * Special case for even numbers of nodes in auto-quorum:
- * if we have exactly half children up
- * and that includes the first ("senior-most") node, then that counts
- * as quorum even if it wouldn't otherwise. This supports e.g. N=2
- * while preserving the critical property that there can only be one
- * such group.
- */
- if ((priv->child_count % 2 == 0) &&
- (up_children_count == (priv->child_count/2)))
- return subvols[0];
- }
+ if (priv->quorum_count == 0)
+ return;
- if (priv->quorum_count == AFR_QUORUM_AUTO) {
- quorum_count = priv->child_count/2 + 1;
- } else {
- quorum_count = priv->quorum_count;
- }
+ /* If the fop already failed return right away to preserve errno */
+ if (local->op_ret == -1)
+ return;
- if (up_children_count >= quorum_count)
- return _gf_true;
+ /*
+ * Network split may happen just after the fops are unwound, so check
+ * if the fop succeeded in a way it still follows quorum. If it doesn't,
+ * mark the fop as failure, mark the changelogs so it reflects that
+ * failure.
+ *
+ * Scenario:
+ * There are 3 mounts on 3 machines(node1, node2, node3) all writing to
+ * single file. Network split happened in a way that node1 can't see
+ * node2, node3. Node2, node3 both of them can't see node1. Now at the
+ * time of sending write all the bricks are up. Just after write fop is
+ * wound on node1, network split happens. Node1 thinks write fop failed
+ * on node2, node3 so marks pending changelog for those 2 extended
+ * attributes on node1. Node2, node3 thinks writes failed on node1 so
+ * they mark pending changelog for node1. When the network is stable
+ * again the file already is in split-brain. These checks prevent
+ * marking pending changelog on other subvolumes if the fop doesn't
+ * succeed in a way it is still following quorum. So with this fix what
+ * is happening is, node1 will have all pending changelog(FOOL) because
+ * the write succeeded only on node1 but failed on node2, node3 so
+ * instead of marking pending changelogs on node2, node3 it just treats
+ * the fop as failure and goes into DIRTY state. Where as node2, node3
+ * say they are sources and have pending changelog to node1 so there is
+ * no split-brain with the fix. The problem is eliminated completely.
+ */
+
+ if (afr_has_fop_cbk_quorum(frame))
+ return;
- return _gf_false;
+ if (afr_need_dirty_marking(frame, this))
+ goto set_response;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.pre_op[i])
+ afr_transaction_fop_failed(frame, frame->this, i);
+ }
+
+set_response:
+ local->op_ret = -1;
+ local->op_errno = afr_final_errno(local, priv);
+ if (local->op_errno == 0)
+ local->op_errno = afr_quorum_errno(priv);
+
+ if (local->fd) {
+ gf_uuid_copy(gfid, local->fd->inode->gfid);
+ file = uuid_utoa(gfid);
+ } else {
+ loc_path(&local->loc, local->loc.name);
+ file = local->loc.path;
+ }
+
+ gf_msg(frame->this->name, GF_LOG_WARNING, local->op_errno,
+ AFR_MSG_QUORUM_FAIL, "%s: Failing %s as quorum is not met", file,
+ gf_fop_list[local->op]);
+
+ switch (local->transaction.type) {
+ case AFR_ENTRY_TRANSACTION:
+ case AFR_ENTRY_RENAME_TRANSACTION:
+ afr_pick_error_xdata(local, priv, local->parent, local->readable,
+ local->parent2, local->readable2);
+ break;
+ default:
+ afr_pick_error_xdata(local, priv, local->inode, local->readable,
+ NULL, NULL);
+ break;
+ }
}
-static gf_boolean_t
-afr_has_fop_quorum (call_frame_t *frame)
+int
+afr_fill_ta_loc(xlator_t *this, loc_t *loc, gf_boolean_t is_gfid_based_fop)
{
- xlator_t *this = frame->this;
- afr_local_t *local = frame->local;
- unsigned char *locked_nodes = NULL;
-
- locked_nodes = afr_locked_nodes_get (local->transaction.type,
- &local->internal_lock);
- return afr_has_quorum (locked_nodes, this);
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+ loc->parent = inode_ref(priv->root_inode);
+ gf_uuid_copy(loc->pargfid, loc->parent->gfid);
+ loc->name = priv->pending_key[THIN_ARBITER_BRICK_INDEX];
+ if (is_gfid_based_fop && gf_uuid_is_null(priv->ta_gfid)) {
+ /* Except afr_ta_id_file_check() which is path based, all other gluster
+ * FOPS need gfid.*/
+ return -EINVAL;
+ }
+ gf_uuid_copy(loc->gfid, priv->ta_gfid);
+ loc->inode = inode_new(loc->parent->table);
+ if (!loc->inode) {
+ loc_wipe(loc);
+ return -ENOMEM;
+ }
+ return 0;
}
-static gf_boolean_t
-afr_has_fop_cbk_quorum (call_frame_t *frame)
+static int
+afr_ta_post_op_done(int ret, call_frame_t *frame, void *opaque)
{
- afr_local_t *local = frame->local;
- xlator_t *this = frame->this;
- afr_private_t *priv = this->private;
- unsigned char *success = alloca0(priv->child_count);
- int i = 0;
+ xlator_t *this = NULL;
+ afr_local_t *local = NULL;
+ call_frame_t *txn_frame = NULL;
+ afr_ta_fop_state_t fop_state;
+
+ local = (afr_local_t *)opaque;
+ fop_state = local->fop_state;
+ txn_frame = local->transaction.frame;
+ this = frame->this;
+
+ if (ret == 0) {
+ /*Mark pending xattrs on the up data brick.*/
+ afr_post_op_handle_success(txn_frame, this);
+ } else {
+ afr_post_op_handle_failure(txn_frame, this, -ret);
+ }
+
+ STACK_DESTROY(frame->root);
+ afr_ta_process_onwireq(fop_state, this);
+
+ return 0;
+}
+int **
+afr_set_changelog_xattr(afr_private_t *priv, unsigned char *pending,
+ dict_t *xattr, afr_local_t *local)
+{
+ int **changelog = NULL;
+ int idx = 0;
+ int ret = 0;
+ int i;
+
+ if (local->is_new_entry == _gf_true) {
+ changelog = afr_mark_pending_changelog(priv, pending, xattr,
+ local->cont.dir_fop.buf.ia_type);
+ } else {
+ idx = afr_index_for_transaction_type(local->transaction.type);
+ changelog = afr_matrix_create(priv->child_count, AFR_NUM_CHANGE_LOGS);
+ if (!changelog) {
+ goto out;
+ }
for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.pre_op[i])
- if (!local->transaction.failed_subvols[i])
- success[i] = 1;
+ if (local->transaction.failed_subvols[i])
+ changelog[i][idx] = hton32(1);
+ }
+ ret = afr_set_pending_dict(priv, xattr, changelog);
+ if (ret < 0) {
+ afr_matrix_cleanup(changelog, priv->child_count);
+ return NULL;
}
+ }
- return afr_has_quorum (success, this);
+out:
+ return changelog;
}
-void
-afr_handle_quorum (call_frame_t *frame)
+static void
+afr_ta_locked_xattrop_validate(afr_private_t *priv, afr_local_t *local,
+ gf_boolean_t *valid)
+{
+ if (priv->ta_event_gen > local->ta_event_gen) {
+ /* We can't trust the ta's response anymore.*/
+ afr_ta_locked_priv_invalidate(priv);
+ *valid = _gf_false;
+ return;
+ }
+ return;
+}
+
+static int
+afr_ta_post_op_do(void *opaque)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- const char *file = NULL;
- uuid_t gfid = {0};
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ dict_t *xattr = NULL;
+ unsigned char *pending = NULL;
+ int **changelog = NULL;
+ int failed_subvol = -1;
+ int success_subvol = -1;
+ loc_t loc = {
+ 0,
+ };
+ int i = 0;
+ int ret = 0;
+ gf_boolean_t valid = _gf_true;
+
+ local = (afr_local_t *)opaque;
+ this = local->transaction.frame->this;
+ priv = this->private;
+
+ ret = afr_fill_ta_loc(this, &loc, _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Failed to populate loc for thin-arbiter.");
+ goto out;
+ }
+
+ xattr = dict_new();
+ if (!xattr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pending = alloca0(priv->child_count);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.failed_subvols[i]) {
+ pending[i] = 1;
+ failed_subvol = i;
+ } else {
+ success_subvol = i;
+ }
+ }
+
+ changelog = afr_set_changelog_xattr(priv, pending, xattr, local);
+
+ if (!changelog) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = afr_ta_post_op_lock(this, &loc);
+ if (ret)
+ goto out;
+
+ ret = syncop_xattrop(priv->children[THIN_ARBITER_BRICK_INDEX], &loc,
+ GF_XATTROP_ADD_ARRAY, xattr, NULL, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, AFR_MSG_THIN_ARB,
+ "Post-op on thin-arbiter id file %s failed for gfid %s.",
+ priv->pending_key[THIN_ARBITER_BRICK_INDEX],
+ uuid_utoa(local->inode->gfid));
+ }
+ LOCK(&priv->lock);
+ {
+ if (ret == 0) {
+ priv->ta_bad_child_index = failed_subvol;
+ } else if (ret == -EINVAL) {
+ priv->ta_bad_child_index = success_subvol;
+ ret = -EIO; /* TA failed the fop. Return EIO to application. */
+ }
- local = frame->local;
- priv = frame->this->private;
+ afr_ta_locked_xattrop_validate(priv, local, &valid);
+ }
+ UNLOCK(&priv->lock);
+ if (valid == _gf_false) {
+ gf_msg(this->name, GF_LOG_ERROR, EIO, AFR_MSG_THIN_ARB,
+ "Post-op on thin-arbiter id file %s for gfid %s invalidated due "
+ "to event-gen mismatch.",
+ priv->pending_key[THIN_ARBITER_BRICK_INDEX],
+ uuid_utoa(local->inode->gfid));
+ ret = -EIO;
+ }
+
+ afr_ta_post_op_unlock(this, &loc);
+out:
+ if (xattr)
+ dict_unref(xattr);
- if (priv->quorum_count == 0)
- return;
+ if (changelog)
+ afr_matrix_cleanup(changelog, priv->child_count);
- /* If the fop already failed return right away to preserve errno */
- if (local->op_ret == -1)
- return;
+ loc_wipe(&loc);
- /*
- * Network split may happen just after the fops are unwound, so check
- * if the fop succeeded in a way it still follows quorum. If it doesn't,
- * mark the fop as failure, mark the changelogs so it reflects that
- * failure.
- *
- * Scenario:
- * There are 3 mounts on 3 machines(node1, node2, node3) all writing to
- * single file. Network split happened in a way that node1 can't see
- * node2, node3. Node2, node3 both of them can't see node1. Now at the
- * time of sending write all the bricks are up. Just after write fop is
- * wound on node1, network split happens. Node1 thinks write fop failed
- * on node2, node3 so marks pending changelog for those 2 extended
- * attributes on node1. Node2, node3 thinks writes failed on node1 so
- * they mark pending changelog for node1. When the network is stable
- * again the file already is in split-brain. These checks prevent
- * marking pending changelog on other subvolumes if the fop doesn't
- * succeed in a way it is still following quorum. So with this fix what
- * is happening is, node1 will have all pending changelog(FOOL) because
- * the write succeeded only on node1 but failed on node2, node3 so
- * instead of marking pending changelogs on node2, node3 it just treats
- * the fop as failure and goes into DIRTY state. Where as node2, node3
- * say they are sources and have pending changelog to node1 so there is
- * no split-brain with the fix. The problem is eliminated completely.
- */
+ return ret;
+}
- if (afr_has_fop_cbk_quorum (frame))
- return;
+static int
+afr_ta_post_op_synctask(xlator_t *this, afr_local_t *local)
+{
+ call_frame_t *ta_frame = NULL;
+ int ret = 0;
+
+ ta_frame = afr_ta_frame_create(this);
+ if (!ta_frame) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to create ta_frame");
+ goto err;
+ }
+ ret = synctask_new(this->ctx->env, afr_ta_post_op_do, afr_ta_post_op_done,
+ ta_frame, local);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, AFR_MSG_THIN_ARB,
+ "Failed to launch post-op on thin arbiter for gfid %s",
+ uuid_utoa(local->inode->gfid));
+ STACK_DESTROY(ta_frame->root);
+ goto err;
+ }
+
+ return ret;
+err:
+ afr_changelog_post_op_fail(local->transaction.frame, this, ENOMEM);
+ return ret;
+}
- if (local->fd) {
- gf_uuid_copy (gfid, local->fd->inode->gfid);
- file = uuid_utoa (gfid);
+static void
+afr_ta_set_fop_state(afr_private_t *priv, afr_local_t *local,
+ int *on_wire_count)
+{
+ LOCK(&priv->lock);
+ {
+ if (priv->release_ta_notify_dom_lock == _gf_true) {
+ /* Put the fop in waitq until notify dom lock is released.*/
+ local->fop_state = TA_WAIT_FOR_NOTIFY_LOCK_REL;
+ list_add_tail(&local->ta_waitq, &priv->ta_waitq);
+ } else if (priv->ta_bad_child_index == AFR_CHILD_UNKNOWN) {
+ /* Post-op on thin-arbiter to decide success/failure. */
+ local->fop_state = TA_GET_INFO_FROM_TA_FILE;
+ *on_wire_count = ++priv->ta_on_wire_txn_count;
+ if (*on_wire_count > 1) {
+ /*Avoid sending multiple on-wire post-ops on TA*/
+ list_add_tail(&local->ta_onwireq, &priv->ta_onwireq);
+ }
+ } else if (local->ta_failed_subvol == priv->ta_bad_child_index) {
+ /* Post-op on TA not needed as the fop failed on the in-memory bad
+ * brick. Just mark pending xattrs on the good data brick.*/
+ local->fop_state = TA_INFO_IN_MEMORY_SUCCESS;
+ priv->ta_in_mem_txn_count++;
} else {
- loc_path (&local->loc, local->loc.name);
- file = local->loc.path;
+ /* Post-op on TA not needed as the fop succeeded only on the
+ * in-memory bad data brick and not the good one. Fail the fop.*/
+ local->fop_state = TA_INFO_IN_MEMORY_FAILED;
+ priv->ta_in_mem_txn_count++;
}
+ }
+ UNLOCK(&priv->lock);
+}
- gf_msg (frame->this->name, GF_LOG_WARNING, 0, AFR_MSG_QUORUM_FAIL,
- "%s: Failing %s as quorum is not met",
- file, gf_fop_list[local->op]);
+static void
+afr_ta_fill_failed_subvol(afr_private_t *priv, afr_local_t *local)
+{
+ int i = 0;
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.pre_op[i])
- afr_transaction_fop_failed (frame, frame->this, i);
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.failed_subvols[i]) {
+ local->ta_failed_subvol = i;
+ break;
}
-
- local->op_ret = -1;
- local->op_errno = afr_quorum_errno (priv);
+ }
}
-int
-afr_changelog_post_op_now (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t * priv = this->private;
- int i = 0;
- int ret = 0;
- int idx = 0;
- afr_local_t * local = NULL;
- dict_t *xattr = NULL;
- int nothing_failed = 1;
- gf_boolean_t need_undirty = _gf_false;
-
- afr_handle_quorum (frame);
- local = frame->local;
- idx = afr_index_for_transaction_type (local->transaction.type);
-
- nothing_failed = afr_txn_nothing_failed (frame, this);
-
- if (afr_changelog_pre_op_uninherit (frame, this))
- need_undirty = _gf_false;
- else
- need_undirty = _gf_true;
-
- if (local->op_ret < 0) {
- afr_changelog_post_op_done (frame, this);
- goto out;
- }
+static void
+afr_post_op_handle_success(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
- if (nothing_failed && !need_undirty) {
- afr_changelog_post_op_done (frame, this);
- goto out;
- }
-
- xattr = dict_new ();
- if (!xattr) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- afr_changelog_post_op_done (frame, this);
- goto out;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.failed_subvols[i])
- local->pending[i][idx] = hton32(1);
- }
-
- ret = afr_set_pending_dict (priv, xattr, local->pending);
- if (ret < 0) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- afr_changelog_post_op_done (frame, this);
- goto out;
- }
-
- if (need_undirty)
- local->dirty[idx] = hton32(-1);
- else
- local->dirty[idx] = hton32(0);
-
- ret = dict_set_static_bin (xattr, AFR_DIRTY, local->dirty,
- sizeof(int) * AFR_NUM_CHANGE_LOGS);
- if (ret) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- afr_changelog_post_op_done (frame, this);
- goto out;
- }
-
- afr_changelog_do (frame, this, xattr, afr_changelog_post_op_done);
-out:
- if (xattr)
- dict_unref (xattr);
+ local = frame->local;
+ if (local->is_new_entry == _gf_true) {
+ afr_mark_new_entry_changelog(frame, this);
+ }
+ afr_changelog_post_op_do(frame, this);
- return 0;
+ return;
}
+static void
+afr_post_op_handle_failure(call_frame_t *frame, xlator_t *this, int op_errno)
+{
+ afr_changelog_post_op_fail(frame, this, op_errno);
-gf_boolean_t
-afr_changelog_pre_op_uninherit (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- fd_t *fd = NULL;
- int i = 0;
- gf_boolean_t ret = _gf_false;
- afr_fd_ctx_t *fd_ctx = NULL;
- int type = 0;
-
- local = frame->local;
- priv = this->private;
- fd = local->fd;
-
- type = afr_index_for_transaction_type (local->transaction.type);
- if (type != AFR_DATA_TRANSACTION)
- return !local->transaction.dirtied;
-
- if (!fd)
- return !local->transaction.dirtied;
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- return _gf_false;
-
- if (local->transaction.no_uninherit)
- return _gf_false;
-
- /* This function must be idempotent. So check if we
- were called before and return the same answer again.
-
- It is important to keep this function idempotent for
- the call in afr_changelog_post_op_safe() to not have
- side effects on the call from afr_changelog_post_op_now()
- */
- if (local->transaction.uninherit_done)
- return local->transaction.uninherit_value;
-
- LOCK(&fd->lock);
- {
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.pre_op[i] !=
- fd_ctx->pre_op_done[type][i]) {
- ret = !local->transaction.dirtied;
- goto unlock;
- }
- }
-
- if (fd_ctx->inherited[type]) {
- ret = _gf_true;
- fd_ctx->inherited[type]--;
- } else if (fd_ctx->on_disk[type]) {
- ret = _gf_false;
- fd_ctx->on_disk[type]--;
- } else {
- /* ASSERT */
- ret = _gf_false;
- }
-
- if (!fd_ctx->inherited[type] && !fd_ctx->on_disk[type]) {
- for (i = 0; i < priv->child_count; i++)
- fd_ctx->pre_op_done[type][i] = 0;
- }
- }
-unlock:
- UNLOCK(&fd->lock);
+ return;
+}
- local->transaction.uninherit_done = _gf_true;
- local->transaction.uninherit_value = ret;
+static void
+afr_ta_decide_post_op_state(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int on_wire_count = 0;
+
+ priv = this->private;
+ local = frame->local;
+
+ afr_ta_set_fop_state(priv, local, &on_wire_count);
+
+ switch (local->fop_state) {
+ case TA_GET_INFO_FROM_TA_FILE:
+ if (on_wire_count == 1)
+ afr_ta_post_op_synctask(this, local);
+ /*else, fop is queued in ta_onwireq.*/
+ break;
+ case TA_WAIT_FOR_NOTIFY_LOCK_REL:
+ /*Post releasing the notify lock, we will act on this queue*/
+ break;
+ case TA_INFO_IN_MEMORY_SUCCESS:
+ afr_post_op_handle_success(frame, this);
+ break;
+ case TA_INFO_IN_MEMORY_FAILED:
+ afr_post_op_handle_failure(frame, this, EIO);
+ break;
+ default:
+ break;
+ }
+ return;
+}
- return ret;
+static void
+afr_handle_failure_using_thin_arbiter(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ afr_local_t *local = frame->local;
+
+ afr_ta_fill_failed_subvol(priv, local);
+ gf_msg_debug(this->name, 0,
+ "Fop failed on data brick (%s) for gfid=%s. "
+ "ta info needed to decide fop result.",
+ priv->children[local->ta_failed_subvol]->name,
+ uuid_utoa(local->inode->gfid));
+ afr_ta_decide_post_op_state(frame, this);
}
+void
+afr_changelog_post_op_do(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = this->private;
+ afr_local_t *local = NULL;
+ dict_t *xattr = NULL;
+ int i = 0;
+ int ret = 0;
+ int idx = 0;
+ int nothing_failed = 1;
+ gf_boolean_t need_undirty = _gf_false;
+
+ afr_handle_quorum(frame, this);
+ local = frame->local;
+ idx = afr_index_for_transaction_type(local->transaction.type);
+
+ xattr = dict_new();
+ if (!xattr) {
+ afr_changelog_post_op_fail(frame, this, ENOMEM);
+ goto out;
+ }
+
+ nothing_failed = afr_txn_nothing_failed(frame, this);
+
+ if (afr_changelog_pre_op_uninherit(frame, this))
+ need_undirty = _gf_false;
+ else
+ need_undirty = _gf_true;
+
+ if (local->op_ret < 0 && !nothing_failed) {
+ if (afr_need_dirty_marking(frame, this)) {
+ local->dirty[idx] = hton32(1);
+ goto set_dirty;
+ }
-gf_boolean_t
-afr_changelog_pre_op_inherit (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- fd_t *fd = NULL;
- int i = 0;
- gf_boolean_t ret = _gf_false;
- afr_fd_ctx_t *fd_ctx = NULL;
- int type = 0;
-
- local = frame->local;
- priv = this->private;
- fd = local->fd;
-
- if (local->transaction.type != AFR_DATA_TRANSACTION)
- return _gf_false;
-
- type = afr_index_for_transaction_type (local->transaction.type);
-
- if (!fd)
- return _gf_false;
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- return _gf_false;
-
- LOCK(&fd->lock);
- {
- if (!fd_ctx->on_disk[type]) {
- /* nothing to inherit yet */
- ret = _gf_false;
- goto unlock;
- }
-
- for (i = 0; i < priv->child_count; i++) {
- if (local->transaction.pre_op[i] !=
- fd_ctx->pre_op_done[type][i]) {
- /* either inherit exactly, or don't */
- ret = _gf_false;
- goto unlock;
- }
- }
-
- fd_ctx->inherited[type]++;
-
- ret = _gf_true;
-
- local->transaction.inherited = _gf_true;
- }
-unlock:
- UNLOCK(&fd->lock);
+ afr_changelog_post_op_done(frame, this);
+ goto out;
+ }
+
+ if (nothing_failed && !need_undirty) {
+ afr_changelog_post_op_done(frame, this);
+ goto out;
+ }
+
+ if (local->transaction.in_flight_sb) {
+ afr_changelog_post_op_fail(frame, this,
+ local->transaction.in_flight_sb_errno);
+ goto out;
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.failed_subvols[i])
+ local->pending[i][idx] = hton32(1);
+ }
+
+ ret = afr_set_pending_dict(priv, xattr, local->pending);
+ if (ret < 0) {
+ afr_changelog_post_op_fail(frame, this, ENOMEM);
+ goto out;
+ }
+
+ if (need_undirty)
+ local->dirty[idx] = hton32(-1);
+ else
+ local->dirty[idx] = hton32(0);
+
+set_dirty:
+ ret = dict_set_static_bin(xattr, AFR_DIRTY, local->dirty,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ if (ret) {
+ afr_changelog_post_op_fail(frame, this, ENOMEM);
+ goto out;
+ }
+
+ afr_changelog_do(frame, this, xattr, afr_changelog_post_op_done,
+ AFR_TRANSACTION_POST_OP);
+out:
+ if (xattr)
+ dict_unref(xattr);
- return ret;
+ return;
}
+static int
+afr_changelog_post_op_now(call_frame_t *frame, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
+ int failed_count = 0;
+
+ priv = this->private;
+ local = frame->local;
+
+ if (priv->thin_arbiter_count) {
+ failed_count = AFR_COUNT(local->transaction.failed_subvols,
+ priv->child_count);
+ if (failed_count == 1) {
+ afr_handle_failure_using_thin_arbiter(frame, this);
+ return 0;
+ } else {
+ /* Txn either succeeded or failed on both data bricks. Let
+ * post_op_do handle it as the case might be. */
+ }
+ }
-gf_boolean_t
-afr_changelog_pre_op_update (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- fd_t *fd = NULL;
- afr_fd_ctx_t *fd_ctx = NULL;
- int i = 0;
- gf_boolean_t ret = _gf_false;
- int type = 0;
-
- local = frame->local;
- priv = this->private;
- fd = local->fd;
-
- if (!fd)
- return _gf_false;
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- return _gf_false;
-
- if (local->transaction.inherited)
- /* was already inherited in afr_changelog_pre_op */
- return _gf_false;
-
- if (!local->transaction.dirtied)
- return _gf_false;
-
- if (!afr_txn_nothing_failed (frame, this))
- return _gf_false;
-
- type = afr_index_for_transaction_type (local->transaction.type);
-
- ret = _gf_false;
-
- LOCK(&fd->lock);
- {
- if (!fd_ctx->on_disk[type]) {
- for (i = 0; i < priv->child_count; i++)
- fd_ctx->pre_op_done[type][i] =
- local->transaction.pre_op[i];
- } else {
- for (i = 0; i < priv->child_count; i++)
- if (fd_ctx->pre_op_done[type][i] !=
- local->transaction.pre_op[i]) {
- local->transaction.no_uninherit = 1;
- goto unlock;
- }
- }
- fd_ctx->on_disk[type]++;
-
- ret = _gf_true;
- }
-unlock:
- UNLOCK(&fd->lock);
-
- return ret;
+ afr_changelog_post_op_do(frame, this);
+ return 0;
}
-
-int
-afr_changelog_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, dict_t *xattr, dict_t *xdata)
+gf_boolean_t
+afr_changelog_pre_op_uninherit(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int call_count = -1;
- int child_index = -1;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_inode_ctx_t *ctx = NULL;
+ int i = 0;
+ gf_boolean_t ret = _gf_false;
+ int type = 0;
+
+ local = frame->local;
+ priv = this->private;
+ ctx = local->inode_ctx;
+
+ type = afr_index_for_transaction_type(local->transaction.type);
+ if (type != AFR_DATA_TRANSACTION)
+ return !local->transaction.dirtied;
+
+ if (local->transaction.no_uninherit)
+ return _gf_false;
+
+ /* This function must be idempotent. So check if we
+ were called before and return the same answer again.
- local = frame->local;
- priv = this->private;
- child_index = (long) cookie;
+ It is important to keep this function idempotent for
+ the call in afr_changelog_post_op_safe() to not have
+ side effects on the call from afr_changelog_post_op_now()
+ */
+ if (local->transaction.uninherit_done)
+ return local->transaction.uninherit_value;
- if (op_ret == -1) {
- local->op_errno = op_errno;
- afr_transaction_fop_failed (frame, this, child_index);
+ LOCK(&local->inode->lock);
+ {
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.pre_op[i] != ctx->pre_op_done[type][i]) {
+ ret = !local->transaction.dirtied;
+ goto unlock;
+ }
}
- if (priv->arbiter_count == 1 && !op_ret) {
- if (xattr)
- local->transaction.pre_op_xdata[child_index] =
- dict_ref (xattr);
+ if (ctx->inherited[type]) {
+ ret = _gf_true;
+ ctx->inherited[type]--;
+ } else if (ctx->on_disk[type]) {
+ ret = _gf_false;
+ ctx->on_disk[type]--;
+ } else {
+ /* ASSERT */
+ ret = _gf_false;
}
- call_count = afr_frame_return (frame);
+ if (!ctx->inherited[type] && !ctx->on_disk[type]) {
+ for (i = 0; i < priv->child_count; i++)
+ ctx->pre_op_done[type][i] = 0;
+ }
+ }
+unlock:
+ UNLOCK(&local->inode->lock);
- if (call_count == 0)
- local->transaction.changelog_resume (frame, this);
+ local->transaction.uninherit_done = _gf_true;
+ local->transaction.uninherit_value = ret;
- return 0;
+ return ret;
}
-
-int
-afr_changelog_do (call_frame_t *frame, xlator_t *this, dict_t *xattr,
- afr_changelog_resume_t changelog_resume)
+gf_boolean_t
+afr_changelog_pre_op_inherit(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- int i = 0;
- int call_count = 0;
-
- local = frame->local;
- priv = this->private;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ gf_boolean_t ret = _gf_false;
+ int type = 0;
- call_count = afr_changelog_call_count (local->transaction.type,
- local->transaction.pre_op,
- priv->child_count);
+ local = frame->local;
+ priv = this->private;
- if (call_count == 0) {
- changelog_resume (frame, this);
- return 0;
- }
+ if (local->transaction.type != AFR_DATA_TRANSACTION)
+ return _gf_false;
- local->call_count = call_count;
+ type = afr_index_for_transaction_type(local->transaction.type);
- local->transaction.changelog_resume = changelog_resume;
+ LOCK(&local->inode->lock);
+ {
+ if (!local->inode_ctx->on_disk[type]) {
+ /* nothing to inherit yet */
+ ret = _gf_false;
+ goto unlock;
+ }
for (i = 0; i < priv->child_count; i++) {
- if (!local->transaction.pre_op[i])
- continue;
+ if (local->transaction.pre_op[i] !=
+ local->inode_ctx->pre_op_done[type][i]) {
+ /* either inherit exactly, or don't */
+ ret = _gf_false;
+ goto unlock;
+ }
+ }
- switch (local->transaction.type) {
- case AFR_DATA_TRANSACTION:
- case AFR_METADATA_TRANSACTION:
- if (!local->fd) {
- STACK_WIND_COOKIE (frame, afr_changelog_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->xattrop,
- &local->loc,
- GF_XATTROP_ADD_ARRAY, xattr,
- NULL);
- } else {
- STACK_WIND_COOKIE (frame, afr_changelog_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->fxattrop,
- local->fd,
- GF_XATTROP_ADD_ARRAY, xattr,
- NULL);
- }
- break;
- case AFR_ENTRY_RENAME_TRANSACTION:
-
- STACK_WIND_COOKIE (frame, afr_changelog_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->xattrop,
- &local->transaction.new_parent_loc,
- GF_XATTROP_ADD_ARRAY, xattr,
- NULL);
- call_count--;
+ local->inode_ctx->inherited[type]++;
- /* fall through */
+ ret = _gf_true;
- case AFR_ENTRY_TRANSACTION:
- if (local->fd)
- STACK_WIND_COOKIE (frame, afr_changelog_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->fxattrop,
- local->fd,
- GF_XATTROP_ADD_ARRAY, xattr,
- NULL);
- else
- STACK_WIND_COOKIE (frame, afr_changelog_cbk,
- (void *) (long) i,
- priv->children[i],
- priv->children[i]->fops->xattrop,
- &local->transaction.parent_loc,
- GF_XATTROP_ADD_ARRAY, xattr,
- NULL);
- break;
- }
-
- if (!--call_count)
- break;
- }
+ local->transaction.inherited = _gf_true;
+ }
+unlock:
+ UNLOCK(&local->inode->lock);
- return 0;
+ return ret;
}
+gf_boolean_t
+afr_changelog_pre_op_update(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+ gf_boolean_t ret = _gf_false;
+ int type = 0;
-int
-afr_changelog_pre_op (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t * priv = this->private;
- int i = 0;
- int ret = 0;
- int call_count = 0;
- int op_errno = 0;
- afr_local_t *local = NULL;
- afr_internal_lock_t *int_lock = NULL;
- unsigned char *locked_nodes = NULL;
- int idx = -1;
- gf_boolean_t pre_nop = _gf_true;
- dict_t *xdata_req = NULL;
-
- local = frame->local;
- int_lock = &local->internal_lock;
- idx = afr_index_for_transaction_type (local->transaction.type);
-
- locked_nodes = afr_locked_nodes_get (local->transaction.type, int_lock);
-
- for (i = 0; i < priv->child_count; i++) {
- if (locked_nodes[i]) {
- local->transaction.pre_op[i] = 1;
- call_count++;
- } else {
- local->transaction.failed_subvols[i] = 1;
- }
- }
-
- /* This condition should not be met with present code, as
- * transaction.done will be called if locks are not acquired on even a
- * single node.
- */
- if (call_count == 0) {
- op_errno = ENOTCONN;
- goto err;
- }
-
- /* Check if the fop can be performed on at least
- * quorum number of nodes.
- */
- if (priv->quorum_count && !afr_has_fop_quorum (frame)) {
- op_errno = afr_quorum_errno (priv);
- goto err;
- }
+ local = frame->local;
+ priv = this->private;
- xdata_req = dict_new();
- if (!xdata_req) {
- op_errno = ENOMEM;
- goto err;
- }
+ if (local->transaction.type == AFR_ENTRY_TRANSACTION ||
+ local->transaction.type == AFR_ENTRY_RENAME_TRANSACTION)
+ return _gf_false;
- if (afr_changelog_pre_op_inherit (frame, this))
- goto next;
+ if (local->transaction.inherited)
+ /* was already inherited in afr_changelog_pre_op */
+ return _gf_false;
- if (call_count < priv->child_count)
- pre_nop = _gf_false;
+ if (!local->transaction.dirtied)
+ return _gf_false;
- /* Set an all-zero pending changelog so that in the cbk, we can get the
- * current on-disk values. In a replica 3 volume with arbiter enabled,
- * these values are needed to arrive at a go/ no-go of the fop phase to
- * avoid ending up in split-brain.*/
+ if (!afr_txn_nothing_failed(frame, this))
+ return _gf_false;
- ret = afr_set_pending_dict (priv, xdata_req, local->pending);
- if (ret < 0) {
- op_errno = ENOMEM;
- goto err;
- }
+ type = afr_index_for_transaction_type(local->transaction.type);
- if ((local->transaction.type == AFR_DATA_TRANSACTION ||
- !local->optimistic_change_log)) {
+ ret = _gf_false;
- local->dirty[idx] = hton32(1);
+ LOCK(&local->inode->lock);
+ {
+ if (!local->inode_ctx->on_disk[type]) {
+ for (i = 0; i < priv->child_count; i++)
+ local->inode_ctx->pre_op_done[type][i] =
+ (!local->transaction.failed_subvols[i]);
+ } else {
+ for (i = 0; i < priv->child_count; i++)
+ if (local->inode_ctx->pre_op_done[type][i] !=
+ (!local->transaction.failed_subvols[i])) {
+ local->transaction.no_uninherit = 1;
+ goto unlock;
+ }
+ }
+ local->inode_ctx->on_disk[type]++;
- ret = dict_set_static_bin (xdata_req, AFR_DIRTY, local->dirty,
- sizeof(int) * AFR_NUM_CHANGE_LOGS);
- if (ret) {
- op_errno = ENOMEM;
- goto err;
- }
+ ret = _gf_true;
+ }
+unlock:
+ UNLOCK(&local->inode->lock);
- pre_nop = _gf_false;
- local->transaction.dirtied = 1;
- }
+ return ret;
+}
- if (pre_nop)
- goto next;
+int
+afr_changelog_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, dict_t *xattr, dict_t *xdata)
+{
+ afr_local_t *local = NULL;
+ int call_count = -1;
+ int child_index = -1;
- if (!local->pre_op_compat) {
- dict_copy (xdata_req, local->xdata_req);
- goto next;
- }
+ local = frame->local;
+ child_index = (long)cookie;
- afr_changelog_do (frame, this, xdata_req, afr_transaction_perform_fop);
+ if (op_ret == -1) {
+ local->op_errno = op_errno;
+ afr_transaction_fop_failed(frame, this, child_index);
+ }
- if (xdata_req)
- dict_unref (xdata_req);
+ if (xattr)
+ local->transaction.changelog_xdata[child_index] = dict_ref(xattr);
- return 0;
-next:
- afr_transaction_perform_fop (frame, this);
+ call_count = afr_frame_return(frame);
- if (xdata_req)
- dict_unref (xdata_req);
+ if (call_count == 0) {
+ local->transaction.changelog_resume(frame, this);
+ }
- return 0;
-err:
- local->internal_lock.lock_cbk = local->transaction.done;
- local->op_ret = -1;
- local->op_errno = op_errno;
+ return 0;
+}
- afr_unlock (frame, this);
+void
+afr_changelog_populate_xdata(call_frame_t *frame, afr_xattrop_type_t op,
+ dict_t **xdata, dict_t **newloc_xdata)
+{
+ int i = 0;
+ int ret = 0;
+ char *key = NULL;
+ int keylen = 0;
+ const char *name = NULL;
+ dict_t *xdata1 = NULL;
+ dict_t *xdata2 = NULL;
+ xlator_t *this = NULL;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ gf_boolean_t need_entry_key_set = _gf_true;
+
+ local = frame->local;
+ this = THIS;
+ priv = this->private;
+
+ if (local->transaction.type == AFR_DATA_TRANSACTION ||
+ local->transaction.type == AFR_METADATA_TRANSACTION)
+ goto out;
+
+ if (!priv->esh_granular)
+ goto out;
+
+ xdata1 = dict_new();
+ if (!xdata1)
+ goto out;
+
+ name = local->loc.name;
+ if (local->op == GF_FOP_LINK)
+ name = local->newloc.name;
+
+ switch (op) {
+ case AFR_TRANSACTION_PRE_OP:
+ key = GF_XATTROP_ENTRY_IN_KEY;
+ break;
+ case AFR_TRANSACTION_POST_OP:
+ if (afr_txn_nothing_failed(frame, this)) {
+ key = GF_XATTROP_ENTRY_OUT_KEY;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.failed_subvols[i])
+ continue;
+ need_entry_key_set = _gf_false;
+ break;
+ }
+ /* If the transaction itself did not fail and there
+ * are no failed subvolumes, check whether the fop
+ * failed due to a symmetric error. If it did, do
+ * not set the ENTRY_OUT xattr which would end up
+ * deleting a name index which was created possibly by
+ * an earlier entry txn that may have failed on some
+ * of the sub-volumes.
+ */
+ if (local->op_ret)
+ need_entry_key_set = _gf_false;
+ } else {
+ key = GF_XATTROP_ENTRY_IN_KEY;
+ }
+ break;
+ }
+
+ if (need_entry_key_set) {
+ keylen = strlen(key);
+ ret = dict_set_strn(xdata1, key, keylen, (char *)name);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, AFR_MSG_DICT_SET_FAILED,
+ "%s/%s: Could not set %s key during xattrop",
+ uuid_utoa(local->loc.pargfid), local->loc.name, key);
+ if (local->transaction.type == AFR_ENTRY_RENAME_TRANSACTION) {
+ xdata2 = dict_new();
+ if (!xdata2)
+ goto out;
- if (xdata_req)
- dict_unref (xdata_req);
+ ret = dict_set_strn(xdata2, key, keylen,
+ (char *)local->newloc.name);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, AFR_MSG_DICT_SET_FAILED,
+ "%s/%s: Could not set %s key during "
+ "xattrop",
+ uuid_utoa(local->newloc.pargfid), local->newloc.name,
+ key);
+ }
+ }
- return 0;
+ *xdata = xdata1;
+ *newloc_xdata = xdata2;
+ xdata1 = xdata2 = NULL;
+out:
+ if (xdata1)
+ dict_unref(xdata1);
+ return;
}
-
int
-afr_post_blocking_inodelk_cbk (call_frame_t *frame, xlator_t *this)
+afr_changelog_prepare(xlator_t *this, call_frame_t *frame, int *call_count,
+ afr_changelog_resume_t changelog_resume,
+ afr_xattrop_type_t op, dict_t **xdata,
+ dict_t **newloc_xdata)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
- int_lock = &local->internal_lock;
+ local = frame->local;
+ priv = this->private;
- if (int_lock->lock_op_ret < 0) {
- gf_msg (this->name, GF_LOG_INFO,
- 0, AFR_MSG_BLOCKING_LKS_FAILED,
- "Blocking inodelks failed.");
- local->transaction.done (frame, this);
- } else {
+ *call_count = afr_changelog_call_count(
+ local->transaction.type, local->transaction.pre_op,
+ local->transaction.failed_subvols, priv->child_count);
- gf_msg_debug (this->name, 0,
- "Blocking inodelks done. Proceeding to FOP");
- afr_internal_lock_finish (frame, this);
- }
+ if (*call_count == 0) {
+ changelog_resume(frame, this);
+ return -1;
+ }
- return 0;
-}
+ afr_changelog_populate_xdata(frame, op, xdata, newloc_xdata);
+ local->call_count = *call_count;
+ local->transaction.changelog_resume = changelog_resume;
+ return 0;
+}
int
-afr_post_nonblocking_inodelk_cbk (call_frame_t *frame, xlator_t *this)
+afr_changelog_do(call_frame_t *frame, xlator_t *this, dict_t *xattr,
+ afr_changelog_resume_t changelog_resume, afr_xattrop_type_t op)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
-
- local = frame->local;
- int_lock = &local->internal_lock;
-
- /* Initiate blocking locks if non-blocking has failed */
- if (int_lock->lock_op_ret < 0) {
- gf_msg_debug (this->name, 0,
- "Non blocking inodelks failed. Proceeding to blocking");
- int_lock->lock_cbk = afr_post_blocking_inodelk_cbk;
- afr_blocking_lock (frame, this);
- } else {
-
- gf_msg_debug (this->name, 0,
- "Non blocking inodelks done. Proceeding to FOP");
- afr_internal_lock_finish (frame, this);
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ dict_t *xdata = NULL;
+ dict_t *newloc_xdata = NULL;
+ int i = 0;
+ int call_count = 0;
+ int ret = 0;
+
+ local = frame->local;
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (local->transaction.changelog_xdata[i]) {
+ dict_unref(local->transaction.changelog_xdata[i]);
+ local->transaction.changelog_xdata[i] = NULL;
}
+ }
+
+ ret = afr_changelog_prepare(this, frame, &call_count, changelog_resume, op,
+ &xdata, &newloc_xdata);
+ if (ret)
return 0;
-}
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.pre_op[i] ||
+ local->transaction.failed_subvols[i])
+ continue;
-int
-afr_post_blocking_entrylk_cbk (call_frame_t *frame, xlator_t *this)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
+ switch (local->transaction.type) {
+ case AFR_DATA_TRANSACTION:
+ case AFR_METADATA_TRANSACTION:
+ if (!local->fd) {
+ STACK_WIND_COOKIE(
+ frame, afr_changelog_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->xattrop,
+ &local->loc, GF_XATTROP_ADD_ARRAY, xattr, xdata);
+ } else {
+ STACK_WIND_COOKIE(
+ frame, afr_changelog_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->fxattrop,
+ local->fd, GF_XATTROP_ADD_ARRAY, xattr, xdata);
+ }
+ break;
+ case AFR_ENTRY_RENAME_TRANSACTION:
- local = frame->local;
- int_lock = &local->internal_lock;
+ STACK_WIND_COOKIE(frame, afr_changelog_cbk, (void *)(long)i,
+ priv->children[i],
+ priv->children[i]->fops->xattrop,
+ &local->transaction.new_parent_loc,
+ GF_XATTROP_ADD_ARRAY, xattr, newloc_xdata);
+ call_count--;
- if (int_lock->lock_op_ret < 0) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_BLOCKING_LKS_FAILED,
- "Blocking entrylks failed.");
- local->transaction.done (frame, this);
- } else {
+ /* fall through */
- gf_msg_debug (this->name, 0,
- "Blocking entrylks done. Proceeding to FOP");
- afr_internal_lock_finish (frame, this);
+ case AFR_ENTRY_TRANSACTION:
+ if (local->fd)
+ STACK_WIND_COOKIE(
+ frame, afr_changelog_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->fxattrop,
+ local->fd, GF_XATTROP_ADD_ARRAY, xattr, xdata);
+ else
+ STACK_WIND_COOKIE(frame, afr_changelog_cbk, (void *)(long)i,
+ priv->children[i],
+ priv->children[i]->fops->xattrop,
+ &local->transaction.parent_loc,
+ GF_XATTROP_ADD_ARRAY, xattr, xdata);
+ break;
}
- return 0;
+ if (!--call_count)
+ break;
+ }
+
+ if (xdata)
+ dict_unref(xdata);
+ if (newloc_xdata)
+ dict_unref(newloc_xdata);
+ return 0;
}
+static void
+afr_init_optimistic_changelog_for_txn(xlator_t *this, afr_local_t *local)
+{
+ int locked_count = 0;
+ afr_private_t *priv = NULL;
+
+ priv = this->private;
+
+ locked_count = AFR_COUNT(local->transaction.pre_op, priv->child_count);
+ if (priv->optimistic_change_log && locked_count == priv->child_count)
+ local->optimistic_change_log = 1;
+
+ return;
+}
int
-afr_post_nonblocking_entrylk_cbk (call_frame_t *frame, xlator_t *this)
+afr_changelog_pre_op(call_frame_t *frame, xlator_t *this)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
+ afr_private_t *priv = this->private;
+ int i = 0;
+ int ret = 0;
+ int call_count = 0;
+ int op_errno = 0;
+ afr_local_t *local = NULL;
+ afr_internal_lock_t *int_lock = NULL;
+ unsigned char *locked_nodes = NULL;
+ int idx = -1;
+ gf_boolean_t pre_nop = _gf_true;
+ dict_t *xdata_req = NULL;
+
+ local = frame->local;
+ int_lock = &local->internal_lock;
+ idx = afr_index_for_transaction_type(local->transaction.type);
+
+ locked_nodes = afr_locked_nodes_get(local->transaction.type, int_lock);
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (locked_nodes[i]) {
+ local->transaction.pre_op[i] = 1;
+ call_count++;
+ } else {
+ local->transaction.failed_subvols[i] = 1;
+ }
+ }
+
+ afr_init_optimistic_changelog_for_txn(this, local);
+
+ if (afr_changelog_pre_op_inherit(frame, this))
+ goto next;
+
+ /* This condition should not be met with present code, as
+ * transaction.done will be called if locks are not acquired on even a
+ * single node.
+ */
+ if (call_count == 0) {
+ op_errno = ENOTCONN;
+ goto err;
+ }
+
+ /* Check if the fop can be performed on at least
+ * quorum number of nodes.
+ */
+ if (priv->quorum_count && !afr_has_fop_quorum(frame)) {
+ op_errno = int_lock->lock_op_errno;
+ if (op_errno == 0)
+ op_errno = afr_quorum_errno(priv);
+ goto err;
+ }
+
+ xdata_req = dict_new();
+ if (!xdata_req) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ if (call_count < priv->child_count)
+ pre_nop = _gf_false;
+
+ /* Set an all-zero pending changelog so that in the cbk, we can get the
+ * current on-disk values. In a replica 3 volume with arbiter enabled,
+ * these values are needed to arrive at a go/ no-go of the fop phase to
+ * avoid ending up in split-brain.*/
+
+ ret = afr_set_pending_dict(priv, xdata_req, local->pending);
+ if (ret < 0) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ if (afr_needs_changelog_update(local)) {
+ local->dirty[idx] = hton32(1);
+
+ ret = dict_set_static_bin(xdata_req, AFR_DIRTY, local->dirty,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ if (ret) {
+ op_errno = ENOMEM;
+ goto err;
+ }
- local = frame->local;
- int_lock = &local->internal_lock;
+ pre_nop = _gf_false;
+ local->transaction.dirtied = 1;
+ }
- /* Initiate blocking locks if non-blocking has failed */
- if (int_lock->lock_op_ret < 0) {
- gf_msg_debug (this->name, 0,
- "Non blocking entrylks failed. Proceeding to blocking");
- int_lock->lock_cbk = afr_post_blocking_entrylk_cbk;
- afr_blocking_lock (frame, this);
- } else {
+ if (pre_nop)
+ goto next;
- gf_msg_debug (this->name, 0,
- "Non blocking entrylks done. Proceeding to FOP");
+ if (!local->pre_op_compat) {
+ dict_copy(xdata_req, local->xdata_req);
+ goto next;
+ }
- afr_internal_lock_finish (frame, this);
- }
+ afr_changelog_do(frame, this, xdata_req, afr_transaction_perform_fop,
+ AFR_TRANSACTION_PRE_OP);
- return 0;
-}
+ if (xdata_req)
+ dict_unref(xdata_req);
+ return 0;
+next:
+ afr_transaction_perform_fop(frame, this);
-int
-afr_post_blocking_rename_cbk (call_frame_t *frame, xlator_t *this)
-{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
+ if (xdata_req)
+ dict_unref(xdata_req);
- local = frame->local;
- int_lock = &local->internal_lock;
+ return 0;
+err:
+ local->internal_lock.lock_cbk = afr_transaction_done;
+ local->op_ret = -1;
+ local->op_errno = op_errno;
- if (int_lock->lock_op_ret < 0) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- AFR_MSG_BLOCKING_LKS_FAILED,
- "Blocking entrylks failed.");
+ afr_handle_lock_acquire_failure(local);
- local->transaction.done (frame, this);
- } else {
+ if (xdata_req)
+ dict_unref(xdata_req);
- gf_msg_debug (this->name, 0,
- "Blocking entrylks done. Proceeding to FOP");
+ return 0;
+}
- afr_internal_lock_finish (frame, this);
- }
- return 0;
+int
+afr_post_nonblocking_lock_cbk(call_frame_t *frame, xlator_t *this)
+{
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+
+ local = frame->local;
+ int_lock = &local->internal_lock;
+
+ /* Initiate blocking locks if non-blocking has failed */
+ if (int_lock->lock_op_ret < 0) {
+ gf_msg_debug(this->name, 0,
+ "Non blocking locks failed. Proceeding to blocking");
+ int_lock->lock_cbk = afr_internal_lock_finish;
+ afr_blocking_lock(frame, this);
+ } else {
+ gf_msg_debug(this->name, 0,
+ "Non blocking locks done. Proceeding to FOP");
+
+ afr_internal_lock_finish(frame, this);
+ }
+
+ return 0;
}
int
-afr_post_lower_unlock_cbk (call_frame_t *frame, xlator_t *this)
+afr_post_blocking_rename_cbk(call_frame_t *frame, xlator_t *this)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
- int_lock = &local->internal_lock;
+ local = frame->local;
+ int_lock = &local->internal_lock;
- GF_ASSERT (!int_lock->higher_locked);
+ if (int_lock->lock_op_ret < 0) {
+ gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_INTERNAL_LKS_FAILED,
+ "Blocking entrylks failed.");
- int_lock->lock_cbk = afr_post_blocking_rename_cbk;
- afr_blocking_lock (frame, this);
+ afr_transaction_done(frame, this);
+ } else {
+ gf_msg_debug(this->name, 0,
+ "Blocking entrylks done. Proceeding to FOP");
- return 0;
+ afr_internal_lock_finish(frame, this);
+ }
+ return 0;
}
-
int
-afr_set_transaction_flock (afr_local_t *local)
+afr_post_lower_unlock_cbk(call_frame_t *frame, xlator_t *this)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_inodelk_t *inodelk = NULL;
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
- int_lock = &local->internal_lock;
- inodelk = afr_get_inodelk (int_lock, int_lock->domain);
+ local = frame->local;
+ int_lock = &local->internal_lock;
- inodelk->flock.l_len = local->transaction.len;
- inodelk->flock.l_start = local->transaction.start;
- inodelk->flock.l_type = F_WRLCK;
+ GF_ASSERT(!int_lock->higher_locked);
- return 0;
+ int_lock->lock_cbk = afr_post_blocking_rename_cbk;
+ afr_blocking_lock(frame, this);
+
+ return 0;
}
int
-afr_lock_rec (call_frame_t *frame, xlator_t *this)
+afr_set_transaction_flock(xlator_t *this, afr_local_t *local,
+ afr_lockee_t *lockee)
{
- afr_internal_lock_t *int_lock = NULL;
- afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ struct gf_flock *flock = NULL;
+
+ priv = this->private;
+ flock = &lockee->flock;
+
+ if ((priv->arbiter_count || local->transaction.eager_lock_on ||
+ priv->full_lock) &&
+ local->transaction.type == AFR_DATA_TRANSACTION) {
+ /*Lock entire file to avoid network split brains.*/
+ flock->l_len = 0;
+ flock->l_start = 0;
+ } else {
+ flock->l_len = local->transaction.len;
+ flock->l_start = local->transaction.start;
+ }
+ flock->l_type = F_WRLCK;
+
+ return 0;
+}
- local = frame->local;
- int_lock = &local->internal_lock;
+int
+afr_lock(call_frame_t *frame, xlator_t *this)
+{
+ afr_internal_lock_t *int_lock = NULL;
+ afr_local_t *local = NULL;
+ int i = 0;
- int_lock->transaction_lk_type = AFR_TRANSACTION_LK;
- int_lock->domain = this->name;
+ local = frame->local;
+ int_lock = &local->internal_lock;
- switch (local->transaction.type) {
+ int_lock->lock_cbk = afr_post_nonblocking_lock_cbk;
+ int_lock->domain = this->name;
+
+ switch (local->transaction.type) {
case AFR_DATA_TRANSACTION:
case AFR_METADATA_TRANSACTION:
- afr_set_transaction_flock (local);
-
- int_lock->lock_cbk = afr_post_nonblocking_inodelk_cbk;
+ for (i = 0; i < int_lock->lockee_count; i++) {
+ afr_set_transaction_flock(this, local, &int_lock->lockee[i]);
+ }
- afr_nonblocking_inodelk (frame, this);
- break;
-
- case AFR_ENTRY_RENAME_TRANSACTION:
-
- int_lock->lock_cbk = afr_post_nonblocking_entrylk_cbk;
- afr_nonblocking_entrylk (frame, this);
- break;
+ break;
case AFR_ENTRY_TRANSACTION:
- int_lock->lk_basename = local->transaction.basename;
- if (local->transaction.parent_loc.path)
- int_lock->lk_loc = &local->transaction.parent_loc;
- else
- GF_ASSERT (local->fd);
-
- int_lock->lock_cbk = afr_post_nonblocking_entrylk_cbk;
- afr_nonblocking_entrylk (frame, this);
- break;
- }
+ int_lock->lk_basename = local->transaction.basename;
+ if (local->transaction.parent_loc.path)
+ int_lock->lk_loc = &local->transaction.parent_loc;
+ else
+ GF_ASSERT(local->fd);
+ break;
+ case AFR_ENTRY_RENAME_TRANSACTION:
+ break;
+ }
+ afr_lock_nonblocking(frame, this);
- return 0;
+ return 0;
}
-
-int
-afr_lock (call_frame_t *frame, xlator_t *this)
+static gf_boolean_t
+afr_locals_overlap(afr_local_t *local1, afr_local_t *local2)
{
- afr_set_lock_number (frame, this);
-
- return afr_lock_rec (frame, this);
+ uint64_t start1 = local1->transaction.start;
+ uint64_t start2 = local2->transaction.start;
+ uint64_t end1 = 0;
+ uint64_t end2 = 0;
+
+ if (local1->transaction.len)
+ end1 = start1 + local1->transaction.len - 1;
+ else
+ end1 = ULLONG_MAX;
+
+ if (local2->transaction.len)
+ end2 = start2 + local2->transaction.len - 1;
+ else
+ end2 = ULLONG_MAX;
+
+ return ((end1 >= start2) && (end2 >= start1));
}
-
-/* }}} */
-
-int
-afr_internal_lock_finish (call_frame_t *frame, xlator_t *this)
+gf_boolean_t
+afr_has_lock_conflict(afr_local_t *local, gf_boolean_t waitlist_check)
{
- if (__fop_changelog_needed (frame, this)) {
- afr_changelog_pre_op (frame, this);
- } else {
- afr_transaction_perform_fop (frame, this);
+ afr_local_t *each = NULL;
+ afr_lock_t *lock = NULL;
+
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ /*
+ * Once full file lock is acquired in eager-lock phase, overlapping
+ * writes do not compete for inode-locks, instead are transferred to the
+ * next writes. Because of this overlapping writes are not ordered.
+ * This can cause inconsistencies in replication.
+ * Example:
+ * Two overlapping writes w1, w2 are sent in parallel on same fd
+ * in two threads t1, t2.
+ * Both threads can execute afr_writev_wind in the following manner.
+ * t1 winds w1 on brick-0
+ * t2 winds w2 on brick-0
+ * t2 winds w2 on brick-1
+ * t1 winds w1 on brick-1
+ *
+ * This check makes sure the locks are not transferred for
+ * overlapping writes.
+ */
+ list_for_each_entry(each, &lock->owners, transaction.owner_list)
+ {
+ if (afr_locals_overlap(each, local)) {
+ return _gf_true;
}
+ }
- return 0;
+ if (!waitlist_check)
+ return _gf_false;
+ list_for_each_entry(each, &lock->waiting, transaction.wait_list)
+ {
+ if (afr_locals_overlap(each, local)) {
+ return _gf_true;
+ }
+ }
+ return _gf_false;
}
+/* }}} */
+static void
+afr_copy_inodelk_vars(afr_internal_lock_t *dst, afr_internal_lock_t *src,
+ xlator_t *this, int lockee_num)
+{
+ afr_private_t *priv = this->private;
+ afr_lockee_t *sl = &src->lockee[lockee_num];
+ afr_lockee_t *dl = &dst->lockee[lockee_num];
+
+ dst->domain = src->domain;
+ dl->flock.l_len = sl->flock.l_len;
+ dl->flock.l_start = sl->flock.l_start;
+ dl->flock.l_type = sl->flock.l_type;
+ dl->locked_count = sl->locked_count;
+ memcpy(dl->locked_nodes, sl->locked_nodes,
+ priv->child_count * sizeof(*dl->locked_nodes));
+}
void
-afr_set_delayed_post_op (call_frame_t *frame, xlator_t *this)
+__afr_transaction_wake_shared(afr_local_t *local, struct list_head *shared)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- /* call this function from any of the related optimizations
- which benefit from delaying post op are enabled, namely:
-
- - changelog piggybacking
- - eager locking
- */
-
- priv = this->private;
- if (!priv)
- return;
-
- if (!priv->post_op_delay_secs)
- return;
-
- local = frame->local;
- if (!local)
- return;
+ gf_boolean_t conflict = _gf_false;
+ afr_local_t *each = NULL;
+ afr_lock_t *lock = &local->inode_ctx->lock[local->transaction.type];
+
+ while (!conflict) {
+ if (list_empty(&lock->waiting))
+ return;
+ each = list_entry(lock->waiting.next, afr_local_t,
+ transaction.wait_list);
+ if (afr_has_lock_conflict(each, _gf_false)) {
+ conflict = _gf_true;
+ }
+ if (conflict && !list_empty(&lock->owners))
+ return;
+ afr_copy_inodelk_vars(&each->internal_lock, &local->internal_lock,
+ each->transaction.frame->this, 0);
+ list_move_tail(&each->transaction.wait_list, shared);
+ list_add_tail(&each->transaction.owner_list, &lock->owners);
+ }
+}
- if (!local->transaction.eager_lock_on)
- return;
+static void
+afr_lock_resume_shared(struct list_head *list)
+{
+ afr_local_t *each = NULL;
+
+ while (!list_empty(list)) {
+ each = list_entry(list->next, afr_local_t, transaction.wait_list);
+ list_del_init(&each->transaction.wait_list);
+ afr_changelog_pre_op(each->transaction.frame,
+ each->transaction.frame->this);
+ }
+}
- if (!local->fd)
- return;
+int
+afr_internal_lock_finish(call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = frame->local;
+ afr_lock_t *lock = NULL;
+
+ local->internal_lock.lock_cbk = NULL;
+ if (!local->transaction.eager_lock_on) {
+ if (local->internal_lock.lock_op_ret < 0) {
+ afr_transaction_done(frame, this);
+ return 0;
+ }
+ afr_changelog_pre_op(frame, this);
+ } else {
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ if (local->internal_lock.lock_op_ret < 0) {
+ afr_handle_lock_acquire_failure(local);
+ } else {
+ lock->event_generation = local->event_generation;
+ afr_changelog_pre_op(frame, this);
+ }
+ }
- if (local->op == GF_FOP_WRITE)
- local->delayed_post_op = _gf_true;
+ return 0;
}
gf_boolean_t
-afr_are_multiple_fds_opened (fd_t *fd, xlator_t *this)
+afr_are_conflicting_ops_waiting(afr_local_t *local, xlator_t *this)
{
- afr_fd_ctx_t *fd_ctx = NULL;
-
- if (!fd) {
- /* If false is returned, it may keep on taking eager-lock
- * which may lead to starvation, so return true to avoid that.
- */
- gf_msg_callingfn (this->name, GF_LOG_ERROR, EBADF,
- AFR_MSG_INVALID_ARG, "Invalid fd");
- return _gf_true;
+ afr_lock_t *lock = NULL;
+ lock = &local->inode_ctx->lock[local->transaction.type];
+
+ /* Lets say mount1 has eager-lock(full-lock) and after the eager-lock
+ * is taken mount2 opened the same file, it won't be able to
+ * perform any {meta,}data operations until mount1 releases eager-lock.
+ * To avoid such scenario do not enable eager-lock for this transaction
+ * if open-fd-count is > 1 for metadata transactions and if num-inodelks > 1
+ * for data transactions
+ */
+
+ if (local->transaction.type == AFR_METADATA_TRANSACTION) {
+ if (local->inode_ctx->open_fd_count > 1) {
+ return _gf_true;
}
- /* Lets say mount1 has eager-lock(full-lock) and after the eager-lock
- * is taken mount2 opened the same file, it won't be able to
- * perform any data operations until mount1 releases eager-lock.
- * To avoid such scenario do not enable eager-lock for this transaction
- * if open-fd-count is > 1
- */
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- return _gf_true;
-
- if (fd_ctx->open_fd_count > 1)
- return _gf_true;
+ } else if (local->transaction.type == AFR_DATA_TRANSACTION) {
+ if (lock->num_inodelks > 1) {
+ return _gf_true;
+ }
+ }
- return _gf_false;
+ return _gf_false;
}
-
gf_boolean_t
-is_afr_delayed_changelog_post_op_needed (call_frame_t *frame, xlator_t *this)
+afr_is_delayed_changelog_post_op_needed(call_frame_t *frame, xlator_t *this,
+ int delay)
{
- afr_local_t *local = NULL;
- gf_boolean_t res = _gf_false;
-
- local = frame->local;
- if (!local)
- goto out;
-
- if (!local->delayed_post_op)
- goto out;
-
- //Mark pending changelog ASAP
- if (!afr_txn_nothing_failed (frame, this))
- goto out;
-
- if (local->fd && afr_are_multiple_fds_opened (local->fd, this))
- goto out;
-
- res = _gf_true;
+ afr_local_t *local = NULL;
+ afr_lock_t *lock = NULL;
+ gf_boolean_t res = _gf_false;
+
+ local = frame->local;
+ lock = &local->inode_ctx->lock[local->transaction.type];
+
+ if (!afr_txn_nothing_failed(frame, this)) {
+ lock->release = _gf_true;
+ goto out;
+ }
+
+ if (afr_are_conflicting_ops_waiting(local, this)) {
+ lock->release = _gf_true;
+ goto out;
+ }
+
+ if (!list_empty(&lock->owners))
+ goto out;
+ else
+ GF_ASSERT(list_empty(&lock->waiting));
+
+ if (lock->release) {
+ goto out;
+ }
+
+ if (!delay) {
+ goto out;
+ }
+
+ if (local->transaction.disable_delayed_post_op) {
+ goto out;
+ }
+
+ if ((local->op != GF_FOP_WRITE) && (local->op != GF_FOP_FXATTROP) &&
+ (local->op != GF_FOP_FSYNC)) {
+ /*Only allow writes/fsyncs but shard does [f]xattrops on writes, so
+ * they are fine too*/
+ goto out;
+ }
+
+ res = _gf_true;
out:
- return res;
+ return res;
}
-
void
-afr_delayed_changelog_post_op (xlator_t *this, call_frame_t *frame, fd_t *fd,
- call_stub_t *stub);
-
-void
-afr_delayed_changelog_wake_up_cbk (void *data)
+afr_delayed_changelog_wake_up_cbk(void *data)
{
- fd_t *fd = NULL;
-
- fd = data;
-
- afr_delayed_changelog_wake_up (THIS, fd);
+ afr_lock_t *lock = NULL;
+ afr_local_t *local = data;
+ afr_local_t *timer_local = NULL;
+ struct list_head shared;
+
+ INIT_LIST_HEAD(&shared);
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ LOCK(&local->inode->lock);
+ {
+ timer_local = list_entry(lock->post_op.next, afr_local_t,
+ transaction.owner_list);
+ if (list_empty(&lock->owners) && (local == timer_local)) {
+ GF_ASSERT(list_empty(&lock->waiting));
+ /*Last owner*/
+ lock->release = _gf_true;
+ lock->delay_timer = NULL;
+ }
+ }
+ UNLOCK(&local->inode->lock);
+ afr_changelog_post_op_now(local->transaction.frame,
+ local->transaction.frame->this);
}
-
/* SET operation */
int
-afr_fd_report_unstable_write (xlator_t *this, fd_t *fd)
+afr_fd_report_unstable_write(xlator_t *this, afr_local_t *local)
{
- afr_fd_ctx_t *fdctx = NULL;
-
- fdctx = afr_fd_ctx_get (fd, this);
+ LOCK(&local->inode->lock);
+ {
+ local->inode_ctx->witnessed_unstable_write = _gf_true;
+ }
+ UNLOCK(&local->inode->lock);
- LOCK(&fd->lock);
- {
- fdctx->witnessed_unstable_write = _gf_true;
- }
- UNLOCK(&fd->lock);
-
- return 0;
+ return 0;
}
/* TEST and CLEAR operation */
gf_boolean_t
-afr_fd_has_witnessed_unstable_write (xlator_t *this, fd_t *fd)
+afr_fd_has_witnessed_unstable_write(xlator_t *this, inode_t *inode)
{
- afr_fd_ctx_t *fdctx = NULL;
- gf_boolean_t witness = _gf_false;
+ afr_inode_ctx_t *ctx = NULL;
+ gf_boolean_t witness = _gf_false;
- fdctx = afr_fd_ctx_get (fd, this);
- if (!fdctx)
- return _gf_true;
+ LOCK(&inode->lock);
+ {
+ (void)__afr_inode_ctx_get(this, inode, &ctx);
- LOCK(&fd->lock);
- {
- if (fdctx->witnessed_unstable_write) {
- witness = _gf_true;
- fdctx->witnessed_unstable_write = _gf_false;
- }
+ if (ctx->witnessed_unstable_write) {
+ witness = _gf_true;
+ ctx->witnessed_unstable_write = _gf_false;
}
- UNLOCK (&fd->lock);
+ }
+ UNLOCK(&inode->lock);
- return witness;
+ return witness;
}
-
int
-afr_changelog_fsync_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int op_ret, int op_errno, struct iatt *pre,
- struct iatt *post, dict_t *xdata)
+afr_changelog_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *pre,
+ struct iatt *post, dict_t *xdata)
{
- afr_private_t *priv = NULL;
- int child_index = (long) cookie;
- int call_count = -1;
- afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int child_index = (long)cookie;
+ int call_count = -1;
+ afr_local_t *local = NULL;
- priv = this->private;
- local = frame->local;
+ priv = this->private;
+ local = frame->local;
- if (op_ret != 0) {
- /* Failure of fsync() is as good as failure of previous
- write(). So treat it like one.
- */
- gf_msg (this->name, GF_LOG_WARNING,
- op_errno, AFR_MSG_FSYNC_FAILED,
- "fsync(%s) failed on subvolume %s. Transaction was %s",
- uuid_utoa (local->fd->inode->gfid),
- priv->children[child_index]->name,
- gf_fop_list[local->op]);
+ if (op_ret != 0) {
+ /* Failure of fsync() is as good as failure of previous
+ write(). So treat it like one.
+ */
+ gf_msg(this->name, GF_LOG_WARNING, op_errno, AFR_MSG_FSYNC_FAILED,
+ "fsync(%s) failed on subvolume %s. Transaction was %s",
+ uuid_utoa(local->fd->inode->gfid),
+ priv->children[child_index]->name, gf_fop_list[local->op]);
- afr_transaction_fop_failed (frame, this, child_index);
- }
+ afr_transaction_fop_failed(frame, this, child_index);
+ }
- call_count = afr_frame_return (frame);
+ call_count = afr_frame_return(frame);
- if (call_count == 0)
- afr_changelog_post_op_now (frame, this);
+ if (call_count == 0)
+ afr_changelog_post_op_now(frame, this);
- return 0;
+ return 0;
}
-
int
-afr_changelog_fsync (call_frame_t *frame, xlator_t *this)
+afr_changelog_fsync(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- int i = 0;
- int call_count = 0;
- afr_private_t *priv = NULL;
- dict_t *xdata = NULL;
- GF_UNUSED int ret = -1;
+ afr_local_t *local = NULL;
+ int i = 0;
+ int call_count = 0;
+ afr_private_t *priv = NULL;
+ dict_t *xdata = NULL;
+ GF_UNUSED int ret = -1;
- local = frame->local;
- priv = this->private;
+ local = frame->local;
+ priv = this->private;
- call_count = AFR_COUNT (local->transaction.pre_op, priv->child_count);
+ call_count = AFR_COUNT(local->transaction.pre_op, priv->child_count);
- if (!call_count) {
- /* will go straight to unlock */
- afr_changelog_post_op_now (frame, this);
- return 0;
- }
+ if (!call_count) {
+ /* will go straight to unlock */
+ afr_changelog_post_op_now(frame, this);
+ return 0;
+ }
- local->call_count = call_count;
+ local->call_count = call_count;
- xdata = dict_new();
- if (xdata)
- ret = dict_set_int32 (xdata, "batch-fsync", 1);
+ xdata = dict_new();
+ if (xdata) {
+ ret = dict_set_int32_sizen(xdata, "batch-fsync", 1);
+ ret = dict_set_str(xdata, GLUSTERFS_INTERNAL_FOP_KEY, "yes");
+ }
- for (i = 0; i < priv->child_count; i++) {
- if (!local->transaction.pre_op[i])
- continue;
+ for (i = 0; i < priv->child_count; i++) {
+ if (!local->transaction.pre_op[i])
+ continue;
- STACK_WIND_COOKIE (frame, afr_changelog_fsync_cbk,
- (void *) (long) i, priv->children[i],
- priv->children[i]->fops->fsync, local->fd,
- 1, xdata);
- if (!--call_count)
- break;
- }
+ STACK_WIND_COOKIE(frame, afr_changelog_fsync_cbk, (void *)(long)i,
+ priv->children[i], priv->children[i]->fops->fsync,
+ local->fd, 1, xdata);
+ if (!--call_count)
+ break;
+ }
- if (xdata)
- dict_unref (xdata);
+ if (xdata)
+ dict_unref(xdata);
- return 0;
+ return 0;
}
-
int
-afr_changelog_post_op_safe (call_frame_t *frame, xlator_t *this)
+afr_changelog_post_op_safe(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
-
- local = frame->local;
- priv = this->private;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
- if (!local->fd || local->transaction.type != AFR_DATA_TRANSACTION) {
- afr_changelog_post_op_now (frame, this);
- return 0;
- }
+ local = frame->local;
+ priv = this->private;
- if (afr_changelog_pre_op_uninherit (frame, this) &&
- afr_txn_nothing_failed (frame, this)) {
- /* just detected that this post-op is about to
- be optimized away as a new write() has
- already piggybacked on this frame's changelog.
- */
- afr_changelog_post_op_now (frame, this);
- return 0;
- }
+ if (!local->fd || local->transaction.type != AFR_DATA_TRANSACTION) {
+ afr_changelog_post_op_now(frame, this);
+ return 0;
+ }
- /* Calling afr_changelog_post_op_now() now will result in
- issuing ->[f]xattrop().
-
- Performing a hard POST-OP (->[f]xattrop() FOP) is a more
- responsible operation that what it might appear on the surface.
-
- The changelog of a file (in the xattr of the file on the server)
- stores information (pending count) about the state of the file
- on the OTHER server. This changelog is blindly trusted, and must
- therefore be updated in such a way it remains trustworthy. This
- implies that decrementing the pending count (essentially "clearing
- the dirty flag") must be done STRICTLY after we are sure that the
- operation on the other server has reached stable storage.
-
- While the backend filesystem on that server will eventually flush
- it to stable storage, we (being in userspace) have no mechanism
- to get notified when the write became "stable".
-
- This means we need take matter into our own hands and issue an
- fsync() EVEN IF THE APPLICATION WAS PERFORMING UNSTABLE WRITES,
- and get an acknowledgement for it. And we need to wait for the
- fsync() acknowledgement before initiating the hard POST-OP.
-
- However if the FD itself was opened in O_SYNC or O_DSYNC then
- we are already guaranteed that the writes were made stable as
- part of the FOP itself. The same holds true for NFS stable
- writes which happen on an anonymous FD with O_DSYNC or O_SYNC
- flag set in the writev() @flags param. For all other write types,
- mark a flag in the fdctx whenever an unstable write is witnessed.
+ if (afr_changelog_pre_op_uninherit(frame, this) &&
+ afr_txn_nothing_failed(frame, this)) {
+ /* just detected that this post-op is about to
+ be optimized away as a new write() has
+ already piggybacked on this frame's changelog.
*/
-
- if (!afr_fd_has_witnessed_unstable_write (this, local->fd)) {
- afr_changelog_post_op_now (frame, this);
- return 0;
- }
-
- /* Check whether users want durability and perform fsync/post-op
- * accordingly.
- */
- if (priv->ensure_durability) {
- /* Time to fsync() */
- afr_changelog_fsync (frame, this);
- } else {
- afr_changelog_post_op_now (frame, this);
- }
-
+ afr_changelog_post_op_now(frame, this);
return 0;
+ }
+
+ /* Calling afr_changelog_post_op_now() now will result in
+ issuing ->[f]xattrop().
+
+ Performing a hard POST-OP (->[f]xattrop() FOP) is a more
+ responsible operation that what it might appear on the surface.
+
+ The changelog of a file (in the xattr of the file on the server)
+ stores information (pending count) about the state of the file
+ on the OTHER server. This changelog is blindly trusted, and must
+ therefore be updated in such a way it remains trustworthy. This
+ implies that decrementing the pending count (essentially "clearing
+ the dirty flag") must be done STRICTLY after we are sure that the
+ operation on the other server has reached stable storage.
+
+ While the backend filesystem on that server will eventually flush
+ it to stable storage, we (being in userspace) have no mechanism
+ to get notified when the write became "stable".
+
+ This means we need take matter into our own hands and issue an
+ fsync() EVEN IF THE APPLICATION WAS PERFORMING UNSTABLE WRITES,
+ and get an acknowledgement for it. And we need to wait for the
+ fsync() acknowledgement before initiating the hard POST-OP.
+
+ However if the FD itself was opened in O_SYNC or O_DSYNC then
+ we are already guaranteed that the writes were made stable as
+ part of the FOP itself. The same holds true for NFS stable
+ writes which happen on an anonymous FD with O_DSYNC or O_SYNC
+ flag set in the writev() @flags param. For all other write types,
+ mark a flag in the fdctx whenever an unstable write is witnessed.
+ */
+
+ if (!afr_fd_has_witnessed_unstable_write(this, local->inode)) {
+ afr_changelog_post_op_now(frame, this);
+ return 0;
+ }
+
+ /* Check whether users want durability and perform fsync/post-op
+ * accordingly.
+ */
+ if (priv->ensure_durability) {
+ /* Time to fsync() */
+ afr_changelog_fsync(frame, this);
+ } else {
+ afr_changelog_post_op_now(frame, this);
+ }
+
+ return 0;
}
-
void
-afr_delayed_changelog_post_op (xlator_t *this, call_frame_t *frame, fd_t *fd,
- call_stub_t *stub)
+afr_changelog_post_op(call_frame_t *frame, xlator_t *this)
{
- afr_fd_ctx_t *fd_ctx = NULL;
- call_frame_t *prev_frame = NULL;
- struct timespec delta = {0, };
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
-
- priv = this->private;
-
- fd_ctx = afr_fd_ctx_get (fd, this);
- if (!fd_ctx)
- goto out;
+ struct timespec delta = {
+ 0,
+ };
+ afr_private_t *priv = NULL;
+ afr_local_t *local = frame->local;
+ afr_lock_t *lock = NULL;
+ gf_boolean_t post_op = _gf_true;
+ struct list_head shared;
+
+ priv = this->private;
+ delta.tv_sec = priv->post_op_delay_secs;
+ delta.tv_nsec = 0;
+
+ INIT_LIST_HEAD(&shared);
+ if (!local->transaction.eager_lock_on)
+ goto out;
+
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ LOCK(&local->inode->lock);
+ {
+ list_del_init(&local->transaction.owner_list);
+ list_add(&local->transaction.owner_list, &lock->post_op);
+ __afr_transaction_wake_shared(local, &shared);
+
+ if (!afr_is_delayed_changelog_post_op_needed(frame, this,
+ delta.tv_sec)) {
+ if (list_empty(&lock->owners))
+ lock->release = _gf_true;
+ goto unlock;
+ }
- delta.tv_sec = priv->post_op_delay_secs;
- delta.tv_nsec = 0;
-
- pthread_mutex_lock (&fd_ctx->delay_lock);
- {
- prev_frame = fd_ctx->delay_frame;
- fd_ctx->delay_frame = NULL;
- if (fd_ctx->delay_timer)
- gf_timer_call_cancel (this->ctx, fd_ctx->delay_timer);
- fd_ctx->delay_timer = NULL;
- if (!frame)
- goto unlock;
- fd_ctx->delay_timer = gf_timer_call_after (this->ctx, delta,
- afr_delayed_changelog_wake_up_cbk,
- fd);
- fd_ctx->delay_frame = frame;
- }
+ GF_ASSERT(lock->delay_timer == NULL);
+ lock->delay_timer = gf_timer_call_after(
+ this->ctx, delta, afr_delayed_changelog_wake_up_cbk, local);
+ if (!lock->delay_timer) {
+ lock->release = _gf_true;
+ } else {
+ post_op = _gf_false;
+ }
+ }
unlock:
- pthread_mutex_unlock (&fd_ctx->delay_lock);
-
-out:
- if (prev_frame) {
- local = prev_frame->local;
- local->transaction.resume_stub = stub;
- afr_changelog_post_op_now (prev_frame, this);
- } else if (stub) {
- call_resume (stub);
- }
-}
-
-
-void
-afr_changelog_post_op (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
+ UNLOCK(&local->inode->lock);
- local = frame->local;
+ if (!list_empty(&shared)) {
+ afr_lock_resume_shared(&shared);
+ }
- if (is_afr_delayed_changelog_post_op_needed (frame, this))
- afr_delayed_changelog_post_op (this, frame, local->fd, NULL);
- else
- afr_changelog_post_op_safe (frame, this);
-}
-
-
-
-/* Wake up the sleeping/delayed post-op, and also register
- a stub to have it resumed after this transaction
- completely finishes.
-
- The @stub gets saved in @local and gets resumed in
- afr_local_cleanup()
- */
-void
-afr_delayed_changelog_wake_resume (xlator_t *this, fd_t *fd, call_stub_t *stub)
-{
- afr_delayed_changelog_post_op (this, NULL, fd, stub);
-}
-
-
-void
-afr_delayed_changelog_wake_up (xlator_t *this, fd_t *fd)
-{
- afr_delayed_changelog_post_op (this, NULL, fd, NULL);
+out:
+ if (post_op) {
+ if (!local->transaction.eager_lock_on || lock->release) {
+ afr_changelog_post_op_safe(frame, this);
+ } else {
+ afr_changelog_post_op_now(frame, this);
+ }
+ }
}
-
int
-afr_transaction_resume (call_frame_t *frame, xlator_t *this)
+afr_transaction_resume(call_frame_t *frame, xlator_t *this)
{
- afr_local_t *local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
-
- if (local->transaction.eager_lock_on) {
- /* We don't need to retain "local" in the
- fd list anymore, writes to all subvols
- are finished by now */
- afr_remove_eager_lock_stub (local);
- }
+ local = frame->local;
- afr_restore_lk_owner (frame);
+ afr_restore_lk_owner(frame);
- afr_handle_symmetric_errors (frame, this);
+ afr_handle_symmetric_errors(frame, this);
- if (!local->pre_op_compat)
- /* new mode, pre-op was done along
- with OP */
- afr_changelog_pre_op_update (frame, this);
+ if (!local->pre_op_compat)
+ /* new mode, pre-op was done along
+ with OP */
+ afr_changelog_pre_op_update(frame, this);
- if (__fop_changelog_needed (frame, this)) {
- afr_changelog_post_op (frame, this);
- } else {
- afr_changelog_post_op_done (frame, this);
- }
+ afr_changelog_post_op(frame, this);
- return 0;
+ return 0;
}
-
/**
* afr_transaction_fop_failed - inform that an fop failed
*/
void
-afr_transaction_fop_failed (call_frame_t *frame, xlator_t *this,
- int child_index)
+afr_transaction_fop_failed(call_frame_t *frame, xlator_t *this, int child_index)
{
- afr_local_t * local = NULL;
+ afr_local_t *local = NULL;
- local = frame->local;
+ local = frame->local;
- local->transaction.failed_subvols[child_index] = 1;
+ local->transaction.failed_subvols[child_index] = 1;
}
-
-
- static gf_boolean_t
-afr_locals_overlap (afr_local_t *local1, afr_local_t *local2)
+static gf_boolean_t
+__need_previous_lock_unlocked(afr_local_t *local)
{
- uint64_t start1 = local1->transaction.start;
- uint64_t start2 = local2->transaction.start;
- uint64_t end1 = 0;
- uint64_t end2 = 0;
-
- if (local1->transaction.len)
- end1 = start1 + local1->transaction.len - 1;
- else
- end1 = ULLONG_MAX;
-
- if (local2->transaction.len)
- end2 = start2 + local2->transaction.len - 1;
- else
- end2 = ULLONG_MAX;
+ afr_lock_t *lock = NULL;
- return ((end1 >= start2) && (end2 >= start1));
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ if (!lock->acquired)
+ return _gf_false;
+ if (lock->acquired && lock->event_generation != local->event_generation)
+ return _gf_true;
+ return _gf_false;
}
void
-afr_transaction_eager_lock_init (afr_local_t *local, xlator_t *this)
+__afr_eager_lock_handle(afr_local_t *local, gf_boolean_t *take_lock,
+ gf_boolean_t *do_pre_op, afr_local_t **timer_local)
{
- afr_private_t *priv = NULL;
- afr_fd_ctx_t *fdctx = NULL;
- afr_local_t *each = NULL;
-
- priv = this->private;
-
- if (!local->fd)
- return;
-
- if (local->transaction.type != AFR_DATA_TRANSACTION)
- return;
-
- if (!priv->eager_lock)
- return;
-
- fdctx = afr_fd_ctx_get (local->fd, this);
- if (!fdctx)
- return;
-
- if (afr_are_multiple_fds_opened (local->fd, this))
- return;
- /*
- * Once full file lock is acquired in eager-lock phase, overlapping
- * writes do not compete for inode-locks, instead are transferred to the
- * next writes. Because of this overlapping writes are not ordered.
- * This can cause inconsistencies in replication.
- * Example:
- * Two overlapping writes w1, w2 are sent in parallel on same fd
- * in two threads t1, t2.
- * Both threads can execute afr_writev_wind in the following manner.
- * t1 winds w1 on brick-0
- * t2 winds w2 on brick-0
- * t2 winds w2 on brick-1
- * t1 winds w1 on brick-1
- *
- * This check makes sure the locks are not transferred for
- * overlapping writes.
- */
- LOCK (&local->fd->lock);
- {
- list_for_each_entry (each, &fdctx->eager_locked,
- transaction.eager_locked) {
- if (afr_locals_overlap (each, local)) {
- local->transaction.eager_lock_on = _gf_false;
- goto unlock;
- }
- }
+ afr_lock_t *lock = NULL;
+ afr_local_t *owner_local = NULL;
+ xlator_t *this = local->transaction.frame->this;
+
+ local->transaction.eager_lock_on = _gf_true;
+ afr_set_lk_owner(local->transaction.frame, this, local->inode);
+
+ lock = &local->inode_ctx->lock[local->transaction.type];
+ if (__need_previous_lock_unlocked(local)) {
+ if (!list_empty(&lock->owners)) {
+ lock->release = _gf_true;
+ } else if (lock->delay_timer) {
+ lock->release = _gf_true;
+ if (gf_timer_call_cancel(this->ctx, lock->delay_timer)) {
+ /* It will be put in frozen list
+ * in the code flow below*/
+ } else {
+ *timer_local = list_entry(lock->post_op.next, afr_local_t,
+ transaction.owner_list);
+ lock->delay_timer = NULL;
+ }
+ }
+ }
+
+ if (lock->release) {
+ list_add_tail(&local->transaction.wait_list, &lock->frozen);
+ *take_lock = _gf_false;
+ goto out;
+ }
+
+ if (lock->delay_timer) {
+ *take_lock = _gf_false;
+ if (gf_timer_call_cancel(this->ctx, lock->delay_timer)) {
+ list_add_tail(&local->transaction.wait_list, &lock->frozen);
+ } else {
+ *timer_local = list_entry(lock->post_op.next, afr_local_t,
+ transaction.owner_list);
+ afr_copy_inodelk_vars(&local->internal_lock,
+ &(*timer_local)->internal_lock, this, 0);
+ lock->delay_timer = NULL;
+ *do_pre_op = _gf_true;
+ list_add_tail(&local->transaction.owner_list, &lock->owners);
+ }
+ goto out;
+ }
- local->transaction.eager_lock_on = _gf_true;
- list_add_tail (&local->transaction.eager_locked,
- &fdctx->eager_locked);
+ if (!list_empty(&lock->owners)) {
+ if (!lock->acquired || afr_has_lock_conflict(local, _gf_true)) {
+ list_add_tail(&local->transaction.wait_list, &lock->waiting);
+ *take_lock = _gf_false;
+ goto out;
}
-unlock:
- UNLOCK (&local->fd->lock);
+ owner_local = list_entry(lock->owners.next, afr_local_t,
+ transaction.owner_list);
+ afr_copy_inodelk_vars(&local->internal_lock,
+ &owner_local->internal_lock, this, 0);
+ *take_lock = _gf_false;
+ *do_pre_op = _gf_true;
+ }
+
+ if (lock->acquired)
+ GF_ASSERT(!(*take_lock));
+ list_add_tail(&local->transaction.owner_list, &lock->owners);
+out:
+ return;
}
+void
+afr_transaction_start(afr_local_t *local, xlator_t *this)
+{
+ afr_private_t *priv = NULL;
+ gf_boolean_t take_lock = _gf_true;
+ gf_boolean_t do_pre_op = _gf_false;
+ afr_local_t *timer_local = NULL;
+
+ priv = this->private;
+
+ if (local->transaction.type != AFR_DATA_TRANSACTION &&
+ local->transaction.type != AFR_METADATA_TRANSACTION)
+ goto lock_phase;
+
+ if (!priv->eager_lock)
+ goto lock_phase;
+
+ LOCK(&local->inode->lock);
+ {
+ __afr_eager_lock_handle(local, &take_lock, &do_pre_op, &timer_local);
+ }
+ UNLOCK(&local->inode->lock);
+lock_phase:
+ if (!local->transaction.eager_lock_on) {
+ afr_set_lk_owner(local->transaction.frame, this,
+ local->transaction.frame->root);
+ }
+
+ if (take_lock) {
+ afr_lock(local->transaction.frame, this);
+ } else if (do_pre_op) {
+ afr_changelog_pre_op(local->transaction.frame, this);
+ }
+ /*Always call delayed_changelog_wake_up_cbk after calling pre-op above
+ * so that any inheriting can happen*/
+ if (timer_local)
+ afr_delayed_changelog_wake_up_cbk(timer_local);
+}
int
-afr_transaction (call_frame_t *frame, xlator_t *this, afr_transaction_type type)
+afr_write_txn_refresh_done(call_frame_t *frame, xlator_t *this, int err)
{
- afr_local_t * local = NULL;
- afr_private_t * priv = NULL;
- fd_t *fd = NULL;
- int ret = -1;
-
- local = frame->local;
- priv = this->private;
+ afr_local_t *local = frame->local;
+
+ if (err) {
+ AFR_SET_ERROR_AND_CHECK_SPLIT_BRAIN(-1, err);
+ goto fail;
+ }
+
+ afr_transaction_start(local, this);
+ return 0;
+fail:
+ local->transaction.unwind(frame, this);
+ AFR_STACK_DESTROY(frame);
+ return 0;
+}
- local->transaction.resume = afr_transaction_resume;
- local->transaction.type = type;
+int
+afr_transaction_lockee_init(call_frame_t *frame)
+{
+ afr_local_t *local = frame->local;
+ afr_internal_lock_t *int_lock = &local->internal_lock;
+ afr_private_t *priv = frame->this->private;
+ int ret = 0;
- ret = afr_transaction_local_init (local, this);
- if (ret < 0)
- goto out;
+ switch (local->transaction.type) {
+ case AFR_DATA_TRANSACTION:
+ case AFR_METADATA_TRANSACTION:
+ ret = afr_add_inode_lockee(local, priv->child_count);
+ break;
- ret = afr_inode_get_readable (frame, local->inode, this, 0, 0, type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, EIO, AFR_MSG_SPLIT_BRAIN,
- "Failing %s on gfid %s: split-brain observed.",
- gf_fop_list[local->op], uuid_utoa (local->inode->gfid));
+ case AFR_ENTRY_TRANSACTION:
+ case AFR_ENTRY_RENAME_TRANSACTION:
+ ret = afr_add_entry_lockee(local, &local->transaction.parent_loc,
+ local->transaction.basename,
+ priv->child_count);
+ if (ret) {
goto out;
- }
- afr_transaction_eager_lock_init (local, this);
-
- if (local->fd && local->transaction.eager_lock_on)
- afr_set_lk_owner (frame, this, local->fd);
- else
- afr_set_lk_owner (frame, this, frame->root);
-
- if (!local->transaction.eager_lock_on && local->loc.inode) {
- fd = fd_lookup (local->loc.inode, frame->root->pid);
- if (fd == NULL)
- fd = fd_lookup_anonymous (local->loc.inode);
+ }
+ if (local->op == GF_FOP_RENAME) {
+ ret = afr_add_entry_lockee(
+ local, &local->transaction.new_parent_loc,
+ local->transaction.new_basename, priv->child_count);
+ if (ret) {
+ goto out;
+ }
- if (fd) {
- afr_delayed_changelog_wake_up (this, fd);
- fd_unref (fd);
+ if (local->newloc.inode &&
+ IA_ISDIR(local->newloc.inode->ia_type)) {
+ ret = afr_add_entry_lockee(local, &local->newloc, NULL,
+ priv->child_count);
+ if (ret) {
+ goto out;
+ }
}
- }
+ } else if (local->op == GF_FOP_RMDIR) {
+ ret = afr_add_entry_lockee(local, &local->loc, NULL,
+ priv->child_count);
+ if (ret) {
+ goto out;
+ }
+ }
+
+ if (int_lock->lockee_count > 1) {
+ qsort(int_lock->lockee, int_lock->lockee_count,
+ sizeof(*int_lock->lockee), afr_entry_lockee_cmp);
+ }
+ break;
+ }
+out:
+ return ret;
+}
- if (afr_lock_server_count (priv, local->transaction.type) == 0) {
- afr_internal_lock_finish (frame, this);
- } else {
- afr_lock (frame, this);
- }
+int
+afr_transaction(call_frame_t *frame, xlator_t *this, afr_transaction_type type)
+{
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ int ret = -1;
+ int event_generation = 0;
+
+ local = frame->local;
+ priv = this->private;
+ local->transaction.frame = frame;
+
+ local->transaction.type = type;
+
+ if (priv->quorum_count && !afr_has_quorum(local->child_up, this, NULL)) {
+ ret = -afr_quorum_errno(priv);
+ goto out;
+ }
+
+ if (!afr_is_consistent_io_possible(local, priv, &ret)) {
+ ret = -ret; /*op_errno to ret conversion*/
+ goto out;
+ }
+
+ if (priv->thin_arbiter_count && !afr_ta_has_quorum(priv, local)) {
+ ret = -afr_quorum_errno(priv);
+ goto out;
+ }
+
+ ret = afr_transaction_local_init(local, this);
+ if (ret < 0)
+ goto out;
+
+ ret = afr_transaction_lockee_init(frame);
+ if (ret)
+ goto out;
+
+ if (type != AFR_METADATA_TRANSACTION) {
+ goto txn_start;
+ }
+
+ ret = afr_inode_get_readable(frame, local->inode, this, local->readable,
+ &event_generation, type);
+ if (ret < 0 ||
+ afr_is_inode_refresh_reqd(local->inode, this, priv->event_generation,
+ event_generation)) {
+ afr_inode_refresh(frame, this, local->inode, local->loc.gfid,
+ afr_write_txn_refresh_done);
ret = 0;
+ goto out;
+ }
+
+txn_start:
+ ret = 0;
+ afr_transaction_start(local, this);
out:
- return ret;
+ return ret;
}
diff --git a/xlators/cluster/afr/src/afr-transaction.h b/xlators/cluster/afr/src/afr-transaction.h
index 47d43d88991..beefa26f4a6 100644
--- a/xlators/cluster/afr/src/afr-transaction.h
+++ b/xlators/cluster/afr/src/afr-transaction.h
@@ -14,43 +14,62 @@
#include "afr.h"
void
-afr_transaction_fop_failed (call_frame_t *frame, xlator_t *this,
- int child_index);
-void
-afr_txn_arbitrate_fop_cbk (call_frame_t *frame, xlator_t *this);
+afr_transaction_fop_failed(call_frame_t *frame, xlator_t *this,
+ int child_index);
+
+int32_t
+afr_transaction(call_frame_t *frame, xlator_t *this, afr_transaction_type type);
int
-afr_lock_server_count (afr_private_t *priv, afr_transaction_type type);
+afr_set_pending_dict(afr_private_t *priv, dict_t *xattr, int32_t **pending);
-afr_inodelk_t*
-afr_get_inodelk (afr_internal_lock_t *int_lock, char *dom);
+void
+afr_delayed_changelog_wake_up(xlator_t *this, fd_t *fd);
-int32_t
-afr_transaction (call_frame_t *frame, xlator_t *this, afr_transaction_type type);
+void
+__mark_all_success(call_frame_t *frame, xlator_t *this);
+
+gf_boolean_t
+afr_txn_nothing_failed(call_frame_t *frame, xlator_t *this);
int
-afr_set_pending_dict (afr_private_t *priv, dict_t *xattr, int32_t **pending);
+afr_read_txn(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ afr_read_txn_wind_t readfn, afr_transaction_type type);
-void
-afr_set_delayed_post_op (call_frame_t *frame, xlator_t *this);
+int
+afr_read_txn_continue(call_frame_t *frame, xlator_t *this, int subvol);
void
-afr_delayed_changelog_wake_up (xlator_t *this, fd_t *fd);
+afr_pending_read_increment(afr_private_t *priv, int child_index);
void
-__mark_all_success (call_frame_t *frame, xlator_t *this);
+afr_pending_read_decrement(afr_private_t *priv, int child_index);
+call_frame_t *
+afr_transaction_detach_fop_frame(call_frame_t *frame);
gf_boolean_t
-afr_txn_nothing_failed (call_frame_t *frame, xlator_t *this);
+afr_has_quorum(unsigned char *subvols, xlator_t *this, call_frame_t *frame);
+gf_boolean_t
+afr_needs_changelog_update(afr_local_t *local);
+void
+afr_zero_fill_stat(afr_local_t *local);
-int afr_read_txn (call_frame_t *frame, xlator_t *this, inode_t *inode,
- afr_read_txn_wind_t readfn, afr_transaction_type type);
+void
+afr_pick_error_xdata(afr_local_t *local, afr_private_t *priv, inode_t *inode1,
+ unsigned char *readable1, inode_t *inode2,
+ unsigned char *readable2);
+int
+afr_transaction_resume(call_frame_t *frame, xlator_t *this);
-int afr_read_txn_continue (call_frame_t *frame, xlator_t *this, int subvol);
+int
+afr_lock(call_frame_t *frame, xlator_t *this);
-int __afr_txn_write_fop (call_frame_t *frame, xlator_t *this);
-int __afr_txn_write_done (call_frame_t *frame, xlator_t *this);
-call_frame_t *afr_transaction_detach_fop_frame (call_frame_t *frame);
-gf_boolean_t afr_has_quorum (unsigned char *subvols, xlator_t *this);
+void
+afr_delayed_changelog_wake_up_cbk(void *data);
+
+int
+afr_release_notify_lock_for_ta(void *opaque);
+int
+afr_ta_lock_release_done(int ret, call_frame_t *ta_frame, void *opaque);
#endif /* __TRANSACTION_H__ */
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
index 6e59fd46328..df7366f0a65 100644
--- a/xlators/cluster/afr/src/afr.c
+++ b/xlators/cluster/afr/src/afr.c
@@ -20,864 +20,1325 @@
struct volume_options options[];
+static char *afr_favorite_child_policies[AFR_FAV_CHILD_POLICY_MAX + 1] = {
+ [AFR_FAV_CHILD_NONE] = "none",
+ [AFR_FAV_CHILD_BY_SIZE] = "size",
+ [AFR_FAV_CHILD_BY_CTIME] = "ctime",
+ [AFR_FAV_CHILD_BY_MTIME] = "mtime",
+ [AFR_FAV_CHILD_BY_MAJORITY] = "majority",
+ [AFR_FAV_CHILD_POLICY_MAX] = NULL,
+};
+
int32_t
-notify (xlator_t *this, int32_t event,
- void *data, ...)
+notify(xlator_t *this, int32_t event, void *data, ...)
{
- int ret = -1;
- va_list ap;
- void *data2 = NULL;
+ int ret = -1;
+ va_list ap;
+ void *data2 = NULL;
- va_start (ap, data);
- data2 = va_arg (ap, dict_t*);
- va_end (ap);
- ret = afr_notify (this, event, data, data2);
+ va_start(ap, data);
+ data2 = va_arg(ap, dict_t *);
+ va_end(ap);
+ ret = afr_notify(this, event, data, data2);
- return ret;
+ return ret;
}
int32_t
-mem_acct_init (xlator_t *this)
+mem_acct_init(xlator_t *this)
{
- int ret = -1;
-
- if (!this)
- return ret;
+ int ret = -1;
- ret = xlator_mem_acct_init (this, gf_afr_mt_end + 1);
+ if (!this)
+ return ret;
- if (ret != 0) {
- return ret;
- }
+ ret = xlator_mem_acct_init(this, gf_afr_mt_end + 1);
+ if (ret != 0) {
return ret;
-}
+ }
+ return ret;
+}
int
-xlator_subvolume_index (xlator_t *this, xlator_t *subvol)
+xlator_subvolume_index(xlator_t *this, xlator_t *subvol)
{
- int index = -1;
- int i = 0;
- xlator_list_t *list = NULL;
-
- list = this->children;
-
- while (list) {
- if (subvol == list->xlator ||
- strcmp (subvol->name, list->xlator->name) == 0) {
- index = i;
- break;
- }
- list = list->next;
- i++;
+ int index = -1;
+ int i = 0;
+ xlator_list_t *list = NULL;
+
+ list = this->children;
+
+ while (list) {
+ if (subvol == list->xlator ||
+ strcmp(subvol->name, list->xlator->name) == 0) {
+ index = i;
+ break;
}
+ list = list->next;
+ i++;
+ }
- return index;
+ return index;
}
static void
-fix_quorum_options (xlator_t *this, afr_private_t *priv, char *qtype,
- dict_t *options)
+fix_quorum_options(xlator_t *this, afr_private_t *priv, char *qtype,
+ dict_t *options)
{
- if (dict_get (options, "quorum-type") == NULL) {
- /* If user doesn't configure anything enable auto-quorum if the
- * replica has odd number of subvolumes */
- if (priv->child_count % 2)
- qtype = "auto";
- }
-
- if (priv->quorum_count && strcmp (qtype, "fixed")) {
- gf_msg (this->name,GF_LOG_WARNING, 0, AFR_MSG_QUORUM_OVERRIDE,
- "quorum-type %s overriding quorum-count %u",
- qtype, priv->quorum_count);
- }
-
- if (!strcmp (qtype, "none")) {
- priv->quorum_count = 0;
- } else if (!strcmp (qtype, "auto")) {
- priv->quorum_count = AFR_QUORUM_AUTO;
- }
+ if (dict_get_sizen(options, "quorum-type") == NULL) {
+ /* If user doesn't configure anything enable auto-quorum if the
+ * replica has more than two subvolumes */
+ if (priv->child_count > 2)
+ qtype = "auto";
+ }
+
+ if (priv->quorum_count && strcmp(qtype, "fixed")) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_QUORUM_OVERRIDE,
+ "quorum-type %s overriding quorum-count %u", qtype,
+ priv->quorum_count);
+ }
+
+ if (!strcmp(qtype, "none")) {
+ priv->quorum_count = 0;
+ } else if (!strcmp(qtype, "auto")) {
+ priv->quorum_count = AFR_QUORUM_AUTO;
+ }
}
int
-reconfigure (xlator_t *this, dict_t *options)
+afr_set_favorite_child_policy(afr_private_t *priv, char *policy)
{
- afr_private_t *priv = NULL;
- xlator_t *read_subvol = NULL;
- int read_subvol_index = -1;
- int ret = -1;
- int index = -1;
- char *qtype = NULL;
-
- priv = this->private;
-
- GF_OPTION_RECONF ("afr-dirty-xattr",
- priv->afr_dirty, options, str,
- out);
-
- GF_OPTION_RECONF ("metadata-splitbrain-forced-heal",
- priv->metadata_splitbrain_forced_heal, options, bool,
- out);
+ int index = -1;
- GF_OPTION_RECONF ("background-self-heal-count",
- priv->background_self_heal_count, options, uint32,
- out);
+ index = gf_get_index_by_elem(afr_favorite_child_policies, policy);
+ if (index < 0 || index >= AFR_FAV_CHILD_POLICY_MAX)
+ return -1;
- GF_OPTION_RECONF ("heal-wait-queue-length",
- priv->heal_wait_qlen, options, uint32, out);
+ priv->fav_child_policy = index;
+ return 0;
+}
- GF_OPTION_RECONF ("metadata-self-heal",
- priv->metadata_self_heal, options, bool, out);
-
- GF_OPTION_RECONF ("data-self-heal", priv->data_self_heal, options, str,
- out);
-
- GF_OPTION_RECONF ("entry-self-heal", priv->entry_self_heal, options,
- bool, out);
+static void
+set_data_self_heal_algorithm(afr_private_t *priv, char *algo)
+{
+ if (!algo) {
+ priv->data_self_heal_algorithm = AFR_SELFHEAL_DATA_DYNAMIC;
+ } else if (strcmp(algo, "full") == 0) {
+ priv->data_self_heal_algorithm = AFR_SELFHEAL_DATA_FULL;
+ } else if (strcmp(algo, "diff") == 0) {
+ priv->data_self_heal_algorithm = AFR_SELFHEAL_DATA_DIFF;
+ } else {
+ priv->data_self_heal_algorithm = AFR_SELFHEAL_DATA_DYNAMIC;
+ }
+}
- GF_OPTION_RECONF ("data-self-heal-window-size",
- priv->data_self_heal_window_size, options,
- uint32, out);
+void
+afr_handle_anon_inode_options(afr_private_t *priv, dict_t *options)
+{
+ char *volfile_id_str = NULL;
+ uuid_t anon_inode_gfid = {0};
+
+ /*If volume id is not present don't enable anything*/
+ if (dict_get_str(options, "volume-id", &volfile_id_str))
+ return;
+ GF_ASSERT(strlen(AFR_ANON_DIR_PREFIX) + strlen(volfile_id_str) <= NAME_MAX);
+ /*anon_inode_name is not supposed to change once assigned*/
+ if (!priv->anon_inode_name[0]) {
+ snprintf(priv->anon_inode_name, sizeof(priv->anon_inode_name), "%s-%s",
+ AFR_ANON_DIR_PREFIX, volfile_id_str);
+ gf_uuid_parse(volfile_id_str, anon_inode_gfid);
+ /*Flip a bit to make sure volfile-id and anon-gfid are not same*/
+ anon_inode_gfid[0] ^= 1;
+ uuid_utoa_r(anon_inode_gfid, priv->anon_gfid_str);
+ }
+}
- GF_OPTION_RECONF ("data-change-log", priv->data_change_log, options,
- bool, out);
+int
+reconfigure(xlator_t *this, dict_t *options)
+{
+ afr_private_t *priv = NULL;
+ xlator_t *read_subvol = NULL;
+ int read_subvol_index = -1;
+ int timeout_old = 0;
+ int ret = -1;
+ int index = -1;
+ char *qtype = NULL;
+ char *fav_child_policy = NULL;
+ char *data_self_heal = NULL;
+ char *data_self_heal_algorithm = NULL;
+ char *locking_scheme = NULL;
+ gf_boolean_t consistent_io = _gf_false;
+ gf_boolean_t choose_local_old = _gf_false;
+ gf_boolean_t enabled_old = _gf_false;
- GF_OPTION_RECONF ("metadata-change-log",
- priv->metadata_change_log, options, bool, out);
+ priv = this->private;
- GF_OPTION_RECONF ("entry-change-log", priv->entry_change_log, options,
- bool, out);
+ GF_OPTION_RECONF("metadata-splitbrain-forced-heal",
+ priv->metadata_splitbrain_forced_heal, options, bool, out);
- GF_OPTION_RECONF ("data-self-heal-algorithm",
- priv->data_self_heal_algorithm, options, str, out);
+ GF_OPTION_RECONF("background-self-heal-count",
+ priv->background_self_heal_count, options, uint32, out);
- GF_OPTION_RECONF ("read-subvolume", read_subvol, options, xlator, out);
+ GF_OPTION_RECONF("heal-wait-queue-length", priv->heal_wait_qlen, options,
+ uint32, out);
- GF_OPTION_RECONF ("read-hash-mode", priv->hash_mode,
- options, uint32, out);
+ GF_OPTION_RECONF("metadata-self-heal", priv->metadata_self_heal, options,
+ bool, out);
- if (read_subvol) {
- index = xlator_subvolume_index (this, read_subvol);
- if (index == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_SUBVOL, "%s not a subvolume",
- read_subvol->name);
- goto out;
- }
- priv->read_child = index;
- }
+ GF_OPTION_RECONF("data-self-heal", data_self_heal, options, str, out);
+ if (gf_string2boolean(data_self_heal, &priv->data_self_heal) == -1)
+ goto out;
- GF_OPTION_RECONF ("read-subvolume-index",read_subvol_index, options,int32,out);
-
- if (read_subvol_index >-1) {
- index=read_subvol_index;
- if (index >= priv->child_count) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_SUBVOL,
- "%d not a subvolume-index", index);
- goto out;
- }
- priv->read_child = index;
- }
+ GF_OPTION_RECONF("entry-self-heal", priv->entry_self_heal, options, bool,
+ out);
- GF_OPTION_RECONF ("pre-op-compat", priv->pre_op_compat, options, bool, out);
+ GF_OPTION_RECONF("data-self-heal-window-size",
+ priv->data_self_heal_window_size, options, uint32, out);
- GF_OPTION_RECONF ("eager-lock", priv->eager_lock, options, bool, out);
- GF_OPTION_RECONF ("quorum-type", qtype, options, str, out);
- GF_OPTION_RECONF ("quorum-count", priv->quorum_count, options,
- uint32, out);
- fix_quorum_options (this, priv, qtype, options);
- if (priv->quorum_count && !afr_has_quorum (priv->child_up, this))
- gf_msg (this->name, GF_LOG_WARNING, 0, AFR_MSG_QUORUM_FAIL,
- "Client-quorum is not met");
+ GF_OPTION_RECONF("data-self-heal-algorithm", data_self_heal_algorithm,
+ options, str, out);
+ set_data_self_heal_algorithm(priv, data_self_heal_algorithm);
+ GF_OPTION_RECONF("halo-enabled", priv->halo_enabled, options, bool, out);
- GF_OPTION_RECONF ("post-op-delay-secs", priv->post_op_delay_secs, options,
- uint32, out);
+ GF_OPTION_RECONF("halo-shd-max-latency", priv->shd.halo_max_latency_msec,
+ options, uint32, out);
- GF_OPTION_RECONF (AFR_SH_READDIR_SIZE_KEY, priv->sh_readdir_size,
- options, size_uint64, out);
- /* Reset this so we re-discover in case the topology changed. */
- GF_OPTION_RECONF ("ensure-durability", priv->ensure_durability, options,
- bool, out);
+ GF_OPTION_RECONF("halo-nfsd-max-latency", priv->nfsd.halo_max_latency_msec,
+ options, uint32, out);
- GF_OPTION_RECONF ("self-heal-daemon", priv->shd.enabled, options,
- bool, out);
+ GF_OPTION_RECONF("halo-max-latency", priv->halo_max_latency_msec, options,
+ uint32, out);
- GF_OPTION_RECONF ("iam-self-heal-daemon", priv->shd.iamshd, options,
- bool, out);
+ GF_OPTION_RECONF("halo-max-replicas", priv->halo_max_replicas, options,
+ uint32, out);
- GF_OPTION_RECONF ("heal-timeout", priv->shd.timeout, options,
- int32, out);
+ GF_OPTION_RECONF("halo-min-replicas", priv->halo_min_replicas, options,
+ uint32, out);
- GF_OPTION_RECONF ("quorum-reads", priv->quorum_reads, options,
- bool, out);
- GF_OPTION_RECONF ("consistent-metadata", priv->consistent_metadata,
- options, bool, out);
+ GF_OPTION_RECONF("read-subvolume", read_subvol, options, xlator, out);
- GF_OPTION_RECONF ("shd-max-threads", priv->shd.max_threads,
- options, uint32, out);
+ choose_local_old = priv->choose_local;
+ GF_OPTION_RECONF("choose-local", priv->choose_local, options, bool, out);
- GF_OPTION_RECONF ("shd-wait-qlength", priv->shd.wait_qlength,
- options, uint32, out);
+ if (choose_local_old != priv->choose_local) {
+ priv->read_child = -1;
+ if (choose_local_old == _gf_false)
+ priv->did_discovery = _gf_false;
+ }
+
+ GF_OPTION_RECONF("read-hash-mode", priv->hash_mode, options, uint32, out);
+
+ if (read_subvol) {
+ index = xlator_subvolume_index(this, read_subvol);
+ if (index == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_SUBVOL,
+ "%s not a subvolume", read_subvol->name);
+ goto out;
+ }
+ priv->read_child = index;
+ }
+
+ GF_OPTION_RECONF("read-subvolume-index", read_subvol_index, options, int32,
+ out);
+
+ if (read_subvol_index > -1) {
+ index = read_subvol_index;
+ if (index >= priv->child_count) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_SUBVOL,
+ "%d not a subvolume-index", index);
+ goto out;
+ }
+ priv->read_child = index;
+ }
+
+ GF_OPTION_RECONF("pre-op-compat", priv->pre_op_compat, options, bool, out);
+ GF_OPTION_RECONF("locking-scheme", locking_scheme, options, str, out);
+ priv->granular_locks = (strcmp(locking_scheme, "granular") == 0);
+ GF_OPTION_RECONF("full-lock", priv->full_lock, options, bool, out);
+ GF_OPTION_RECONF("granular-entry-heal", priv->esh_granular, options, bool,
+ out);
+
+ GF_OPTION_RECONF("eager-lock", priv->eager_lock, options, bool, out);
+ GF_OPTION_RECONF("optimistic-change-log", priv->optimistic_change_log,
+ options, bool, out);
+ GF_OPTION_RECONF("quorum-type", qtype, options, str, out);
+ GF_OPTION_RECONF("quorum-count", priv->quorum_count, options, uint32, out);
+ fix_quorum_options(this, priv, qtype, options);
+ if (priv->quorum_count && !afr_has_quorum(priv->child_up, this, NULL))
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_QUORUM_FAIL,
+ "Client-quorum is not met");
+
+ GF_OPTION_RECONF("post-op-delay-secs", priv->post_op_delay_secs, options,
+ uint32, out);
+
+ GF_OPTION_RECONF(AFR_SH_READDIR_SIZE_KEY, priv->sh_readdir_size, options,
+ size_uint64, out);
+ /* Reset this so we re-discover in case the topology changed. */
+ GF_OPTION_RECONF("ensure-durability", priv->ensure_durability, options,
+ bool, out);
+
+ enabled_old = priv->shd.enabled;
+ GF_OPTION_RECONF("self-heal-daemon", priv->shd.enabled, options, bool, out);
+
+ GF_OPTION_RECONF("iam-self-heal-daemon", priv->shd.iamshd, options, bool,
+ out);
+
+ timeout_old = priv->shd.timeout;
+ GF_OPTION_RECONF("heal-timeout", priv->shd.timeout, options, int32, out);
+
+ GF_OPTION_RECONF("consistent-metadata", priv->consistent_metadata, options,
+ bool, out);
+
+ GF_OPTION_RECONF("shd-max-threads", priv->shd.max_threads, options, uint32,
+ out);
+
+ GF_OPTION_RECONF("shd-wait-qlength", priv->shd.wait_qlength, options,
+ uint32, out);
+
+ GF_OPTION_RECONF("favorite-child-policy", fav_child_policy, options, str,
+ out);
+ if (afr_set_favorite_child_policy(priv, fav_child_policy) == -1)
+ goto out;
+
+ priv->did_discovery = _gf_false;
+
+ GF_OPTION_RECONF("consistent-io", consistent_io, options, bool, out);
+ if (priv->quorum_count != 0)
+ consistent_io = _gf_false;
+ priv->consistent_io = consistent_io;
+
+ afr_handle_anon_inode_options(priv, options);
+
+ GF_OPTION_RECONF("use-anonymous-inode", priv->use_anon_inode, options, bool,
+ out);
+ if (priv->shd.enabled) {
+ if ((priv->shd.enabled != enabled_old) ||
+ (timeout_old != priv->shd.timeout))
+ afr_selfheal_childup(this, priv);
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
- priv->did_discovery = _gf_false;
+static int
+afr_pending_xattrs_init(afr_private_t *priv, xlator_t *this)
+{
+ int ret = -1;
+ int i = 0;
+ char *ptr = NULL;
+ char *ptr1 = NULL;
+ char *xattrs_list = NULL;
+ xlator_list_t *trav = NULL;
+ int child_count = -1;
+
+ trav = this->children;
+ child_count = priv->child_count;
+ if (priv->thin_arbiter_count) {
+ /* priv->pending_key[THIN_ARBITER_BRICK_INDEX] is used as the
+ * name of the thin arbiter file for persistence across add/
+ * removal of DHT subvols.*/
+ child_count++;
+ }
+
+ GF_OPTION_INIT("afr-pending-xattr", xattrs_list, str, out);
+ priv->pending_key = GF_CALLOC(sizeof(*priv->pending_key), child_count,
+ gf_afr_mt_char);
+ if (!priv->pending_key) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!xattrs_list) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_NO_CHANGELOG,
+ "Unable to fetch afr-pending-xattr option from volfile."
+ " Falling back to using client translator names. ");
+ while (i < child_count) {
+ ret = gf_asprintf(&priv->pending_key[i], "%s.%s", AFR_XATTR_PREFIX,
+ trav->xlator->name);
+ if (ret == -1) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ trav = trav->next;
+ i++;
+ }
ret = 0;
-out:
- return ret;
+ goto out;
+ }
+
+ ptr = ptr1 = gf_strdup(xattrs_list);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0, ptr = strtok(ptr, ","); ptr; ptr = strtok(NULL, ",")) {
+ ret = gf_asprintf(&priv->pending_key[i], "%s.%s", AFR_XATTR_PREFIX,
+ ptr);
+ if (ret == -1) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ i++;
+ }
+ ret = 0;
+out:
+ GF_FREE(ptr1);
+ return ret;
}
-
-static const char *favorite_child_warning_str = "You have specified subvolume '%s' "
- "as the 'favorite child'. This means that if a discrepancy in the content "
- "or attributes (ownership, permission, etc.) of a file is detected among "
- "the subvolumes, the file on '%s' will be considered the definitive "
- "version and its contents will OVERWRITE the contents of the file on other "
- "subvolumes. All versions of the file except that on '%s' "
- "WILL BE LOST.";
-
+void
+afr_ta_init(afr_private_t *priv)
+{
+ priv->thin_arbiter_count = 1;
+ priv->child_count--;
+ priv->ta_child_up = 0;
+ priv->ta_bad_child_index = AFR_CHILD_UNKNOWN;
+ priv->ta_notify_dom_lock_offset = 0;
+ priv->ta_in_mem_txn_count = 0;
+ priv->ta_on_wire_txn_count = 0;
+ priv->release_ta_notify_dom_lock = _gf_false;
+ INIT_LIST_HEAD(&priv->ta_waitq);
+ INIT_LIST_HEAD(&priv->ta_onwireq);
+ gf_uuid_clear(priv->ta_gfid);
+}
int32_t
-init (xlator_t *this)
+init(xlator_t *this)
{
- afr_private_t *priv = NULL;
- int child_count = 0;
- xlator_list_t *trav = NULL;
- int i = 0;
- int ret = -1;
- GF_UNUSED int op_errno = 0;
- xlator_t *read_subvol = NULL;
- int read_subvol_index = -1;
- xlator_t *fav_child = NULL;
- char *qtype = NULL;
- char *xattrs_list = NULL;
- char *ptr = NULL;
-
- if (!this->children) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_CHILD_MISCONFIGURED,
- "replicate translator needs more than one "
- "subvolume defined.");
- return -1;
+ afr_private_t *priv = NULL;
+ int child_count = 0;
+ xlator_list_t *trav = NULL;
+ int i = 0;
+ int ret = -1;
+ GF_UNUSED int op_errno = 0;
+ xlator_t *read_subvol = NULL;
+ int read_subvol_index = -1;
+ char *qtype = NULL;
+ char *fav_child_policy = NULL;
+ char *thin_arbiter = NULL;
+ char *data_self_heal = NULL;
+ char *locking_scheme = NULL;
+ char *data_self_heal_algorithm = NULL;
+
+ if (!this->children) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_CHILD_MISCONFIGURED,
+ "replicate translator needs more than one "
+ "subvolume defined.");
+ return -1;
+ }
+
+ if (!this->parents) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_VOL_MISCONFIGURED,
+ "Volume is dangling.");
+ }
+
+ this->private = GF_CALLOC(1, sizeof(afr_private_t),
+ gf_afr_mt_afr_private_t);
+ if (!this->private)
+ goto out;
+
+ priv = this->private;
+ INIT_LIST_HEAD(&priv->saved_locks);
+ INIT_LIST_HEAD(&priv->lk_healq);
+ LOCK_INIT(&priv->lock);
+
+ child_count = xlator_subvolume_count(this);
+
+ priv->child_count = child_count;
+
+ priv->read_child = -1;
+
+ GF_OPTION_INIT("arbiter-count", priv->arbiter_count, uint32, out);
+ GF_OPTION_INIT("thin-arbiter", thin_arbiter, str, out);
+ if (thin_arbiter && strlen(thin_arbiter) > 0) {
+ afr_ta_init(priv);
+ }
+ INIT_LIST_HEAD(&priv->healing);
+ INIT_LIST_HEAD(&priv->heal_waiting);
+
+ priv->spb_choice_timeout = AFR_DEFAULT_SPB_CHOICE_TIMEOUT;
+
+ GF_OPTION_INIT("afr-dirty-xattr", priv->afr_dirty, str, out);
+
+ GF_OPTION_INIT("metadata-splitbrain-forced-heal",
+ priv->metadata_splitbrain_forced_heal, bool, out);
+
+ GF_OPTION_INIT("read-subvolume", read_subvol, xlator, out);
+ if (read_subvol) {
+ priv->read_child = xlator_subvolume_index(this, read_subvol);
+ if (priv->read_child == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_SUBVOL,
+ "%s not a subvolume", read_subvol->name);
+ goto out;
}
-
- if (!this->parents) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_VOL_MISCONFIGURED, "Volume is dangling.");
+ }
+ GF_OPTION_INIT("read-subvolume-index", read_subvol_index, int32, out);
+ if (read_subvol_index > -1) {
+ if (read_subvol_index >= priv->child_count) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_SUBVOL,
+ "%d not a subvolume-index", read_subvol_index);
+ goto out;
}
+ priv->read_child = read_subvol_index;
+ }
+ GF_OPTION_INIT("choose-local", priv->choose_local, bool, out);
- this->private = GF_CALLOC (1, sizeof (afr_private_t),
- gf_afr_mt_afr_private_t);
- if (!this->private)
- goto out;
-
- priv = this->private;
- LOCK_INIT (&priv->lock);
-
- child_count = xlator_subvolume_count (this);
-
- priv->child_count = child_count;
-
- priv->read_child = -1;
+ priv->pending_reads = GF_CALLOC(sizeof(*priv->pending_reads),
+ priv->child_count, gf_afr_mt_atomic_t);
- GF_OPTION_INIT ("arbiter-count", priv->arbiter_count, uint32, out);
- INIT_LIST_HEAD (&priv->healing);
- INIT_LIST_HEAD (&priv->heal_waiting);
+ GF_OPTION_INIT("read-hash-mode", priv->hash_mode, uint32, out);
- priv->spb_choice_timeout = AFR_DEFAULT_SPB_CHOICE_TIMEOUT;
+ priv->favorite_child = -1;
- GF_OPTION_INIT ("afr-dirty-xattr", priv->afr_dirty, str, out);
+ GF_OPTION_INIT("favorite-child-policy", fav_child_policy, str, out);
+ if (afr_set_favorite_child_policy(priv, fav_child_policy) == -1)
+ goto out;
- GF_OPTION_INIT ("metadata-splitbrain-forced-heal",
- priv->metadata_splitbrain_forced_heal, bool, out);
+ GF_OPTION_INIT("shd-max-threads", priv->shd.max_threads, uint32, out);
- GF_OPTION_INIT ("read-subvolume", read_subvol, xlator, out);
- if (read_subvol) {
- priv->read_child = xlator_subvolume_index (this, read_subvol);
- if (priv->read_child == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_SUBVOL, "%s not a subvolume",
- read_subvol->name);
- goto out;
- }
- }
- GF_OPTION_INIT ("read-subvolume-index",read_subvol_index,int32,out);
- if (read_subvol_index > -1) {
- if (read_subvol_index >= priv->child_count) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_SUBVOL,
- "%d not a subvolume-index", read_subvol_index);
- goto out;
- }
- priv->read_child = read_subvol_index;
- }
- GF_OPTION_INIT ("choose-local", priv->choose_local, bool, out);
-
- GF_OPTION_INIT ("read-hash-mode", priv->hash_mode, uint32, out);
-
- priv->favorite_child = -1;
- GF_OPTION_INIT ("favorite-child", fav_child, xlator, out);
- if (fav_child) {
- priv->favorite_child = xlator_subvolume_index (this, fav_child);
- if (priv->favorite_child == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INVALID_SUBVOL, "%s not a subvolume, "
- "cannot set it as favorite child",
- fav_child->name);
- goto out;
- }
- gf_msg (this->name, GF_LOG_WARNING, 0, AFR_MSG_FAVORITE_CHILD,
- favorite_child_warning_str, fav_child->name,
- fav_child->name, fav_child->name);
- }
+ GF_OPTION_INIT("shd-wait-qlength", priv->shd.wait_qlength, uint32, out);
- GF_OPTION_INIT ("shd-max-threads", priv->shd.max_threads,
- uint32, out);
+ GF_OPTION_INIT("background-self-heal-count",
+ priv->background_self_heal_count, uint32, out);
- GF_OPTION_INIT ("shd-wait-qlength", priv->shd.wait_qlength,
- uint32, out);
+ GF_OPTION_INIT("heal-wait-queue-length", priv->heal_wait_qlen, uint32, out);
- GF_OPTION_INIT ("background-self-heal-count",
- priv->background_self_heal_count, uint32, out);
+ GF_OPTION_INIT("data-self-heal", data_self_heal, str, out);
+ if (gf_string2boolean(data_self_heal, &priv->data_self_heal) == -1)
+ goto out;
- GF_OPTION_INIT ("heal-wait-queue-length",
- priv->heal_wait_qlen, uint32, out);
+ GF_OPTION_INIT("data-self-heal-algorithm", data_self_heal_algorithm, str,
+ out);
+ set_data_self_heal_algorithm(priv, data_self_heal_algorithm);
- GF_OPTION_INIT ("data-self-heal", priv->data_self_heal, str, out);
+ GF_OPTION_INIT("data-self-heal-window-size",
+ priv->data_self_heal_window_size, uint32, out);
- GF_OPTION_INIT ("data-self-heal-algorithm",
- priv->data_self_heal_algorithm, str, out);
+ GF_OPTION_INIT("metadata-self-heal", priv->metadata_self_heal, bool, out);
- GF_OPTION_INIT ("data-self-heal-window-size",
- priv->data_self_heal_window_size, uint32, out);
+ GF_OPTION_INIT("entry-self-heal", priv->entry_self_heal, bool, out);
- GF_OPTION_INIT ("metadata-self-heal", priv->metadata_self_heal, bool,
- out);
+ GF_OPTION_INIT("halo-shd-max-latency", priv->shd.halo_max_latency_msec,
+ uint32, out);
- GF_OPTION_INIT ("entry-self-heal", priv->entry_self_heal, bool, out);
+ GF_OPTION_INIT("halo-max-latency", priv->halo_max_latency_msec, uint32,
+ out);
+ GF_OPTION_INIT("halo-max-replicas", priv->halo_max_replicas, uint32, out);
+ GF_OPTION_INIT("halo-min-replicas", priv->halo_min_replicas, uint32, out);
- GF_OPTION_INIT ("data-change-log", priv->data_change_log, bool, out);
+ GF_OPTION_INIT("halo-enabled", priv->halo_enabled, bool, out);
- GF_OPTION_INIT ("metadata-change-log", priv->metadata_change_log, bool,
- out);
+ GF_OPTION_INIT("halo-nfsd-max-latency", priv->nfsd.halo_max_latency_msec,
+ uint32, out);
- GF_OPTION_INIT ("entry-change-log", priv->entry_change_log, bool, out);
+ GF_OPTION_INIT("iam-nfs-daemon", priv->nfsd.iamnfsd, bool, out);
- GF_OPTION_INIT ("optimistic-change-log", priv->optimistic_change_log,
- bool, out);
+ GF_OPTION_INIT("optimistic-change-log", priv->optimistic_change_log, bool,
+ out);
- GF_OPTION_INIT ("inodelk-trace", priv->inodelk_trace, bool, out);
+ GF_OPTION_INIT("pre-op-compat", priv->pre_op_compat, bool, out);
+ GF_OPTION_INIT("locking-scheme", locking_scheme, str, out);
+ priv->granular_locks = (strcmp(locking_scheme, "granular") == 0);
+ GF_OPTION_INIT("full-lock", priv->full_lock, bool, out);
+ GF_OPTION_INIT("granular-entry-heal", priv->esh_granular, bool, out);
- GF_OPTION_INIT ("entrylk-trace", priv->entrylk_trace, bool, out);
+ GF_OPTION_INIT("eager-lock", priv->eager_lock, bool, out);
+ GF_OPTION_INIT("quorum-type", qtype, str, out);
+ GF_OPTION_INIT("quorum-count", priv->quorum_count, uint32, out);
+ GF_OPTION_INIT(AFR_SH_READDIR_SIZE_KEY, priv->sh_readdir_size, size_uint64,
+ out);
+ fix_quorum_options(this, priv, qtype, this->options);
- GF_OPTION_INIT ("pre-op-compat", priv->pre_op_compat, bool, out);
+ GF_OPTION_INIT("post-op-delay-secs", priv->post_op_delay_secs, uint32, out);
+ GF_OPTION_INIT("ensure-durability", priv->ensure_durability, bool, out);
- GF_OPTION_INIT ("eager-lock", priv->eager_lock, bool, out);
- GF_OPTION_INIT ("quorum-type", qtype, str, out);
- GF_OPTION_INIT ("quorum-count", priv->quorum_count, uint32, out);
- GF_OPTION_INIT (AFR_SH_READDIR_SIZE_KEY, priv->sh_readdir_size, size_uint64,
- out);
- fix_quorum_options (this, priv, qtype, this->options);
+ GF_OPTION_INIT("self-heal-daemon", priv->shd.enabled, bool, out);
- GF_OPTION_INIT ("post-op-delay-secs", priv->post_op_delay_secs, uint32, out);
- GF_OPTION_INIT ("ensure-durability", priv->ensure_durability, bool,
- out);
+ GF_OPTION_INIT("iam-self-heal-daemon", priv->shd.iamshd, bool, out);
+ GF_OPTION_INIT("heal-timeout", priv->shd.timeout, int32, out);
- GF_OPTION_INIT ("self-heal-daemon", priv->shd.enabled, bool, out);
+ GF_OPTION_INIT("consistent-metadata", priv->consistent_metadata, bool, out);
+ GF_OPTION_INIT("consistent-io", priv->consistent_io, bool, out);
+ afr_handle_anon_inode_options(priv, this->options);
- GF_OPTION_INIT ("iam-self-heal-daemon", priv->shd.iamshd, bool, out);
- GF_OPTION_INIT ("heal-timeout", priv->shd.timeout, int32, out);
+ GF_OPTION_INIT("use-anonymous-inode", priv->use_anon_inode, bool, out);
+ if (priv->quorum_count != 0)
+ priv->consistent_io = _gf_false;
- GF_OPTION_INIT ("quorum-reads", priv->quorum_reads, bool, out);
- GF_OPTION_INIT ("consistent-metadata", priv->consistent_metadata, bool,
- out);
+ priv->wait_count = 1;
- priv->wait_count = 1;
+ priv->local = GF_CALLOC(sizeof(unsigned char), child_count, gf_afr_mt_char);
+ if (!priv->local) {
+ ret = -ENOMEM;
+ goto out;
+ }
- priv->local = GF_CALLOC (sizeof (unsigned char), child_count,
+ priv->anon_inode = GF_CALLOC(sizeof(unsigned char), child_count,
gf_afr_mt_char);
- if (!priv->local) {
- ret = -ENOMEM;
- goto out;
- }
- priv->child_up = GF_CALLOC (sizeof (unsigned char), child_count,
- gf_afr_mt_char);
- if (!priv->child_up) {
- ret = -ENOMEM;
- goto out;
- }
+ priv->child_up = GF_CALLOC(sizeof(unsigned char), child_count,
+ gf_afr_mt_char);
- for (i = 0; i < child_count; i++)
- priv->child_up[i] = -1; /* start with unknown state.
- this initialization needed
- for afr_notify() to work
- reliably
- */
-
- priv->children = GF_CALLOC (sizeof (xlator_t *), child_count,
- gf_afr_mt_xlator_t);
- if (!priv->children) {
- ret = -ENOMEM;
- goto out;
- }
-
- GF_OPTION_INIT ("afr-pending-xattr", xattrs_list, str, out);
- priv->pending_key = GF_CALLOC (sizeof (*priv->pending_key),
- child_count,
- gf_afr_mt_char);
- if (!priv->pending_key) {
- ret = -ENOMEM;
- goto out;
- }
- if (!xattrs_list) {
- ret = -EINVAL;
- gf_msg (this->name, GF_LOG_ERROR, -ret, AFR_MSG_NO_CHANGELOG,
- "Unable to fetch afr pending changelogs. Is op-version"
- " >= 30707?");
- goto out;
- }
- ptr = gf_strdup (xattrs_list);
- if (!ptr) {
- ret = -ENOMEM;
- goto out;
- }
- for (i = 0, ptr = strtok (ptr, ","); ptr; ptr = strtok (NULL, ",")) {
- ret = gf_asprintf (&priv->pending_key[i], "%s.%s",
- AFR_XATTR_PREFIX, ptr);
- if (ret == -1) {
- ret = -ENOMEM;
- goto out;
- }
- i++;
- }
+ priv->child_latency = GF_MALLOC(sizeof(*priv->child_latency) * child_count,
+ gf_afr_mt_child_latency_t);
+ priv->halo_child_up = GF_CALLOC(sizeof(unsigned char), child_count,
+ gf_afr_mt_char);
- trav = this->children;
- i = 0;
- while (i < child_count) {
- priv->children[i] = trav->xlator;
- trav = trav->next;
- i++;
+ if (!priv->child_up || !priv->child_latency || !priv->halo_child_up ||
+ !priv->anon_inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /*Initialize to -ve ping timeout so that they are not considered
+ * in child-up events until ping-event comes*/
+ for (i = 0; i < child_count; i++)
+ priv->child_latency[i] = -1;
+
+ priv->children = GF_CALLOC(sizeof(xlator_t *), child_count,
+ gf_afr_mt_xlator_t);
+ if (!priv->children) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = afr_pending_xattrs_init(priv, this);
+ if (ret)
+ goto out;
+
+ trav = this->children;
+ i = 0;
+ while (i < child_count) {
+ priv->children[i] = trav->xlator;
+ trav = trav->next;
+ i++;
+ }
+
+ ret = gf_asprintf(&priv->sh_domain, AFR_SH_DATA_DOMAIN_FMT, this->name);
+ if (-1 == ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ priv->last_event = GF_CALLOC(child_count, sizeof(*priv->last_event),
+ gf_afr_mt_int32_t);
+ if (!priv->last_event) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this);
+ if (!this->itable) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (priv->shd.iamshd) {
+ ret = afr_selfheal_daemon_init(this);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
}
+ }
- ret = gf_asprintf (&priv->sh_domain, AFR_SH_DATA_DOMAIN_FMT,
- this->name);
- if (-1 == ret) {
- ret = -ENOMEM;
- goto out;
- }
+ /* keep more local here as we may need them for self-heal etc */
+ this->local_pool = mem_pool_new(afr_local_t, 512);
+ if (!this->local_pool) {
+ ret = -1;
+ goto out;
+ }
- priv->last_event = GF_CALLOC (child_count, sizeof (*priv->last_event),
- gf_afr_mt_int32_t);
- if (!priv->last_event) {
- ret = -ENOMEM;
- goto out;
- }
+ priv->root_inode = NULL;
- ret = afr_selfheal_daemon_init (this);
- if (ret) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = 0;
+out:
+ return ret;
+}
+void
+afr_destroy_healer_object(xlator_t *this, struct subvol_healer *healer)
+{
+ int ret = -1;
+
+ if (!healer)
+ return;
+
+ if (healer->running) {
+ /*
+ * If there are any resources to cleanup, We need
+ * to do that gracefully using pthread_cleanup_push
+ */
+ ret = gf_thread_cleanup_xint(healer->thread);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SELF_HEAL_FAILED,
+ "Failed to clean up healer threads.");
+ healer->thread = 0;
+ }
+ pthread_cond_destroy(&healer->cond);
+ pthread_mutex_destroy(&healer->mutex);
+}
- /* keep more local here as we may need them for self-heal etc */
- this->local_pool = mem_pool_new (afr_local_t, 512);
- if (!this->local_pool) {
- ret = -1;
- goto out;
- }
+void
+afr_selfheal_daemon_fini(xlator_t *this)
+{
+ struct subvol_healer *healer = NULL;
+ afr_self_heald_t *shd = NULL;
+ afr_private_t *priv = NULL;
+ int i = 0;
+
+ priv = this->private;
+ if (!priv)
+ return;
+
+ shd = &priv->shd;
+ if (!shd->iamshd)
+ return;
+
+ for (i = 0; i < priv->child_count; i++) {
+ healer = &shd->index_healers[i];
+ afr_destroy_healer_object(this, healer);
+
+ healer = &shd->full_healers[i];
+ afr_destroy_healer_object(this, healer);
+
+ if (shd->statistics[i])
+ eh_destroy(shd->statistics[i]);
+ }
+ GF_FREE(shd->index_healers);
+ GF_FREE(shd->full_healers);
+ GF_FREE(shd->statistics);
+ if (shd->split_brain)
+ eh_destroy(shd->split_brain);
+}
+void
+fini(xlator_t *this)
+{
+ afr_private_t *priv = NULL;
- priv->root_inode = NULL;
+ priv = this->private;
- ret = 0;
-out:
- GF_FREE (ptr);
- return ret;
-}
+ afr_selfheal_daemon_fini(this);
+ GF_ASSERT(list_empty(&priv->saved_locks));
+ LOCK(&priv->lock);
+ if (priv->timer != NULL) {
+ gf_timer_call_cancel(this->ctx, priv->timer);
+ priv->timer = NULL;
+ }
+ UNLOCK(&priv->lock);
-int
-fini (xlator_t *this)
-{
- afr_private_t *priv = NULL;
+ if (this->local_pool != NULL) {
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+ }
- priv = this->private;
- this->private = NULL;
- afr_priv_destroy (priv);
- //if (this->itable);//I dont see any destroy func
+ this->private = NULL;
+ afr_priv_destroy(priv);
+ if (this->itable) {
+ inode_table_destroy(this->itable);
+ this->itable = NULL;
+ }
- return 0;
+ return;
}
-
struct xlator_fops fops = {
- .lookup = afr_lookup,
- .open = afr_open,
- .lk = afr_lk,
- .flush = afr_flush,
- .statfs = afr_statfs,
- .fsync = afr_fsync,
- .fsyncdir = afr_fsyncdir,
- .xattrop = afr_xattrop,
- .fxattrop = afr_fxattrop,
- .inodelk = afr_inodelk,
- .finodelk = afr_finodelk,
- .entrylk = afr_entrylk,
- .fentrylk = afr_fentrylk,
-
- /* inode read */
- .access = afr_access,
- .stat = afr_stat,
- .fstat = afr_fstat,
- .readlink = afr_readlink,
- .getxattr = afr_getxattr,
- .fgetxattr = afr_fgetxattr,
- .readv = afr_readv,
-
- /* inode write */
- .writev = afr_writev,
- .truncate = afr_truncate,
- .ftruncate = afr_ftruncate,
- .setxattr = afr_setxattr,
- .fsetxattr = afr_fsetxattr,
- .setattr = afr_setattr,
- .fsetattr = afr_fsetattr,
- .removexattr = afr_removexattr,
- .fremovexattr = afr_fremovexattr,
- .fallocate = afr_fallocate,
- .discard = afr_discard,
- .zerofill = afr_zerofill,
-
- /* dir read */
- .opendir = afr_opendir,
- .readdir = afr_readdir,
- .readdirp = afr_readdirp,
-
- /* dir write */
- .create = afr_create,
- .mknod = afr_mknod,
- .mkdir = afr_mkdir,
- .unlink = afr_unlink,
- .rmdir = afr_rmdir,
- .link = afr_link,
- .symlink = afr_symlink,
- .rename = afr_rename,
+ .lookup = afr_lookup,
+ .lk = afr_lk,
+ .flush = afr_flush,
+ .statfs = afr_statfs,
+ .fsyncdir = afr_fsyncdir,
+ .inodelk = afr_inodelk,
+ .finodelk = afr_finodelk,
+ .entrylk = afr_entrylk,
+ .fentrylk = afr_fentrylk,
+ .ipc = afr_ipc,
+ .lease = afr_lease,
+
+ /* inode read */
+ .access = afr_access,
+ .stat = afr_stat,
+ .fstat = afr_fstat,
+ .readlink = afr_readlink,
+ .getxattr = afr_getxattr,
+ .fgetxattr = afr_fgetxattr,
+ .readv = afr_readv,
+ .seek = afr_seek,
+
+ /* inode write */
+ .writev = afr_writev,
+ .truncate = afr_truncate,
+ .ftruncate = afr_ftruncate,
+ .setxattr = afr_setxattr,
+ .fsetxattr = afr_fsetxattr,
+ .setattr = afr_setattr,
+ .fsetattr = afr_fsetattr,
+ .removexattr = afr_removexattr,
+ .fremovexattr = afr_fremovexattr,
+ .fallocate = afr_fallocate,
+ .discard = afr_discard,
+ .zerofill = afr_zerofill,
+ .xattrop = afr_xattrop,
+ .fxattrop = afr_fxattrop,
+ .fsync = afr_fsync,
+
+ /*inode open*/
+ .opendir = afr_opendir,
+ .open = afr_open,
+
+ /* dir read */
+ .readdir = afr_readdir,
+ .readdirp = afr_readdirp,
+
+ /* dir write */
+ .create = afr_create,
+ .mknod = afr_mknod,
+ .mkdir = afr_mkdir,
+ .unlink = afr_unlink,
+ .rmdir = afr_rmdir,
+ .link = afr_link,
+ .symlink = afr_symlink,
+ .rename = afr_rename,
};
-
struct xlator_dumpops dumpops = {
- .priv = afr_priv_dump,
+ .priv = afr_priv_dump,
};
-
struct xlator_cbks cbks = {
- .release = afr_release,
- .releasedir = afr_releasedir,
- .forget = afr_forget,
+ .release = afr_release,
+ .releasedir = afr_releasedir,
+ .forget = afr_forget,
};
-
struct volume_options options[] = {
- { .key = {"read-subvolume" },
- .type = GF_OPTION_TYPE_XLATOR,
- .description = "inode-read fops happen only on one of the bricks in "
- "replicate. Afr will prefer the one specified using "
- "this option if it is not stale. Option value must be "
- "one of the xlator names of the children. "
- "Ex: <volname>-client-0 till "
- "<volname>-client-<number-of-bricks - 1>"
- },
- { .key = {"read-subvolume-index" },
- .type = GF_OPTION_TYPE_INT,
- .default_value = "-1",
- .description = "inode-read fops happen only on one of the bricks in "
- "replicate. AFR will prefer the one specified using "
- "this option if it is not stale. allowed options"
- " include -1 till replica-count - 1"
- },
- { .key = {"read-hash-mode" },
- .type = GF_OPTION_TYPE_INT,
- .min = 0,
- .max = 2,
- .default_value = "1",
- .description = "inode-read fops happen only on one of the bricks in "
- "replicate. AFR will prefer the one computed using "
- "the method specified using this option"
- "0 = first up server, "
- "1 = hash by GFID of file (all clients use "
- "same subvolume), "
- "2 = hash by GFID of file and client PID",
- },
- { .key = {"choose-local" },
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "true",
- .description = "Choose a local subvolume (i.e. Brick) to read from"
- " if read-subvolume is not explicitly set.",
- },
- { .key = {"favorite-child"},
- .type = GF_OPTION_TYPE_XLATOR,
- .description = "If a split-brain happens choose subvol/brick set by "
- "this option as source."
- },
- { .key = {"background-self-heal-count"},
- .type = GF_OPTION_TYPE_INT,
- .min = 0,
- .max = 256,
- .default_value = "8",
- .validate = GF_OPT_VALIDATE_MIN,
- .description = "This specifies the number of per client self-heal "
- "jobs that can perform parallel heals in the "
- "background."
- },
- { .key = {"heal-wait-queue-length"},
- .type = GF_OPTION_TYPE_INT,
- .min = 0,
- .max = 10000, /*Around 100MB with sizeof(afr_local_t)= 10496 bytes*/
- .default_value = "128",
- .validate = GF_OPT_VALIDATE_MIN,
- .description = "This specifies the number of heals that can be queued"
- " for the parallel background self heal jobs."
- },
- { .key = {"data-self-heal"},
- .type = GF_OPTION_TYPE_STR,
- .value = {"1", "on", "yes", "true", "enable",
- "0", "off", "no", "false", "disable",
- "open"},
- .default_value = "on",
- .description = "Using this option we can enable/disable data "
- "self-heal on the file. \"open\" means data "
- "self-heal action will only be triggered by file "
- "open operations."
- },
- { .key = {"data-self-heal-algorithm"},
- .type = GF_OPTION_TYPE_STR,
- .description = "Select between \"full\", \"diff\". The "
- "\"full\" algorithm copies the entire file from "
- "source to sink. The \"diff\" algorithm copies to "
- "sink only those blocks whose checksums don't match "
- "with those of source. If no option is configured "
- "the option is chosen dynamically as follows: "
- "If the file does not exist on one of the sinks "
- "or empty file exists or if the source file size is "
- "about the same as page size the entire file will "
- "be read and written i.e \"full\" algo, "
- "otherwise \"diff\" algo is chosen.",
- .value = { "diff", "full"}
- },
- { .key = {"data-self-heal-window-size"},
- .type = GF_OPTION_TYPE_INT,
- .min = 1,
- .max = 1024,
- .default_value = "1",
- .description = "Maximum number blocks per file for which self-heal "
- "process would be applied simultaneously."
- },
- { .key = {"metadata-self-heal"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Using this option we can enable/disable metadata "
- "i.e. Permissions, ownerships, xattrs self-heal on "
- "the file/directory."
- },
- { .key = {"entry-self-heal"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Using this option we can enable/disable entry "
- "self-heal on the directory."
- },
- { .key = {"data-change-log"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Data fops like write/truncate will not perform "
- "pre/post fop changelog operations in afr transaction "
- "if this option is disabled"
- },
- { .key = {"metadata-change-log"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Metadata fops like setattr/setxattr will not perform "
- "pre/post fop changelog operations in afr transaction "
- "if this option is disabled"
- },
- { .key = {"entry-change-log"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Entry fops like create/unlink will not perform "
- "pre/post fop changelog operations in afr transaction "
- "if this option is disabled"
- },
- { .key = {"optimistic-change-log"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Entry/Metadata fops will not perform "
- "pre fop changelog operations in afr transaction "
- "if this option is enabled."
- },
- { .key = {"inodelk-trace"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "off",
- .description = "Enabling this option logs inode lock/unlocks"
- },
- { .key = {"entrylk-trace"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "off",
- .description = "Enabling this option logs entry lock/unlocks"
- },
- { .key = {"pre-op-compat"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Use separate pre-op xattrop() FOP rather than "
- "overloading xdata of the OP"
- },
- { .key = {"eager-lock"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "Lock phase of a transaction has two sub-phases. "
- "First is an attempt to acquire locks in parallel by "
- "broadcasting non-blocking lock requests. If lock "
- "acquisition fails on any server, then the held locks "
- "are unlocked and revert to a blocking locked mode "
- "sequentially on one server after another. If this "
- "option is enabled the initial broadcasting lock "
- "request attempt to acquire lock on the entire file. "
- "If this fails, we revert back to the sequential "
- "\"regional\" blocking lock as before. In the case "
- "where such an \"eager\" lock is granted in the "
- "non-blocking phase, it gives rise to an opportunity "
- "for optimization. i.e, if the next write transaction "
- "on the same FD arrives before the unlock phase of "
- "the first transaction, it \"takes over\" the full "
- "file lock. Similarly if yet another data transaction "
- "arrives before the unlock phase of the \"optimized\" "
- "transaction, that in turn \"takes over\" the lock as "
- "well. The actual unlock now happens at the end of "
- "the last \"optimized\" transaction."
-
- },
- { .key = {"self-heal-daemon"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "on",
- .description = "This option applies to only self-heal-daemon. "
- "Index directory crawl and automatic healing of files "
- "will not be performed if this option is turned off."
- },
- { .key = {"iam-self-heal-daemon"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "off",
- .description = "This option differentiates if the replicate "
- "translator is running as part of self-heal-daemon "
- "or not."
- },
- { .key = {"quorum-type"},
- .type = GF_OPTION_TYPE_STR,
- .value = { "none", "auto", "fixed"},
- .default_value = "none",
- .description = "If value is \"fixed\" only allow writes if "
- "quorum-count bricks are present. If value is "
- "\"auto\" only allow writes if more than half of "
- "bricks, or exactly half including the first, are "
- "present.",
- },
- { .key = {"quorum-count"},
- .type = GF_OPTION_TYPE_INT,
- .min = 1,
- .max = INT_MAX,
- .default_value = 0,
- .description = "If quorum-type is \"fixed\" only allow writes if "
- "this many bricks or present. Other quorum types "
- "will OVERWRITE this value.",
- },
- { .key = {"quorum-reads"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "no",
- .description = "If quorum-reads is \"true\" only allow reads if "
- "quorum is met when quorum is enabled.",
- },
- { .key = {"node-uuid"},
- .type = GF_OPTION_TYPE_STR,
- .description = "Local glusterd uuid string, used in starting "
- "self-heal-daemon so that it can crawl only on "
- "local index directories.",
- },
- { .key = {"post-op-delay-secs"},
- .type = GF_OPTION_TYPE_INT,
- .min = 0,
- .max = INT_MAX,
- .default_value = "1",
- .description = "Time interval induced artificially before "
- "post-operation phase of the transaction to "
- "enhance overlap of adjacent write operations.",
- },
- { .key = {AFR_SH_READDIR_SIZE_KEY},
- .type = GF_OPTION_TYPE_SIZET,
- .description = "readdirp size for performing entry self-heal",
- .min = 1024,
- .max = 131072,
- .default_value = "1KB",
- },
- { .key = {"ensure-durability"},
- .type = GF_OPTION_TYPE_BOOL,
- .description = "Afr performs fsyncs for transactions if this "
- "option is on to make sure the changelogs/data is "
- "written to the disk",
- .default_value = "on",
- },
- { .key = {"afr-dirty-xattr"},
- .type = GF_OPTION_TYPE_STR,
- .default_value = AFR_DIRTY_DEFAULT,
- },
- { .key = {"afr-pending-xattr"},
- .type = GF_OPTION_TYPE_STR,
- .description = "Comma seperated list of xattrs that are used to "
- "capture information on pending heals."
- },
- { .key = {"metadata-splitbrain-forced-heal"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "off",
- },
- { .key = {"heal-timeout"},
- .type = GF_OPTION_TYPE_INT,
- .min = 60,
- .max = INT_MAX,
- .default_value = "600",
- .description = "time interval for checking the need to self-heal "
- "in self-heal-daemon"
- },
- { .key = {"consistent-metadata"},
- .type = GF_OPTION_TYPE_BOOL,
- .default_value = "no",
- .description = "If this option is enabled, readdirp will force "
- "lookups on those entries read whose read child is "
- "not the same as that of the parent. This will "
- "guarantee that all read operations on a file serve "
- "attributes from the same subvol as long as it holds "
- " a good copy of the file/dir.",
- },
- { .key = {"arbiter-count"},
- .type = GF_OPTION_TYPE_INT,
- .description = "subset of child_count. Has to be 0 or 1."
- },
- { .key = {"shd-max-threads"},
- .type = GF_OPTION_TYPE_INT,
- .min = 1,
- .max = 64,
- .default_value = "1",
- .description = "Maximum number of threads SHD can use per local "
- "brick. This can substantially lower heal times, "
- "but can also crush your bricks if you don't have "
- "the storage hardware to support this."
- },
- { .key = {"shd-wait-qlength"},
- .type = GF_OPTION_TYPE_INT,
- .min = 1,
- .max = 655536,
- .default_value = "1024",
- .description = "This option can be used to control number of heals"
- " that can wait in SHD per subvolume",
- },
- { .key = {NULL} },
+ {.key = {"read-subvolume"},
+ .type = GF_OPTION_TYPE_XLATOR,
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "inode-read fops happen only on one of the bricks in "
+ "replicate. Afr will prefer the one specified using "
+ "this option if it is not stale. Option value must be "
+ "one of the xlator names of the children. "
+ "Ex: <volname>-client-0 till "
+ "<volname>-client-<number-of-bricks - 1>"},
+ {.key = {"read-subvolume-index"},
+ .type = GF_OPTION_TYPE_INT,
+ .default_value = "-1",
+ .op_version = {2},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "inode-read fops happen only on one of the bricks in "
+ "replicate. AFR will prefer the one specified using "
+ "this option if it is not stale. allowed options"
+ " include -1 till replica-count - 1"},
+ {.key = {"read-hash-mode"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0,
+ .max = 5,
+ .default_value = "1",
+ .op_version = {2},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description =
+ "inode-read fops happen only on one of the bricks in "
+ "replicate. AFR will prefer the one computed using "
+ "the method specified using this option.\n"
+ "0 = first readable child of AFR, starting from 1st child.\n"
+ "1 = hash by GFID of file (all clients use "
+ "same subvolume).\n"
+ "2 = hash by GFID of file and client PID.\n"
+ "3 = brick having the least outstanding read requests.\n"
+ "4 = brick having the least network ping latency.\n"
+ "5 = Hybrid mode between 3 and 4, ie least value among "
+ "network-latency multiplied by outstanding-read-requests."},
+ {
+ .key = {"choose-local"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "true",
+ .op_version = {2},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "Choose a local subvolume (i.e. Brick) to read from"
+ " if read-subvolume is not explicitly set.",
+ },
+ {.key = {"background-self-heal-count"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0,
+ .max = 256,
+ .default_value = "8",
+ .validate = GF_OPT_VALIDATE_MIN,
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This specifies the number of per client self-heal "
+ "jobs that can perform parallel heals in the "
+ "background."},
+ {.key = {"halo-shd-max-latency"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = 99999,
+ .default_value = "99999",
+ .op_version = {GD_OP_VERSION_3_11_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate", "halo"},
+ .description = "Maximum latency for shd halo replication in msec."},
+ {.key = {"halo-enabled"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "False",
+ .op_version = {GD_OP_VERSION_3_11_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate", "halo"},
+ .description = "Enable Halo (geo) replication mode."},
+ {.key = {"halo-nfsd-max-latency"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = 99999,
+ .default_value = "5",
+ .op_version = {GD_OP_VERSION_3_11_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate", "halo"},
+ .description = "Maximum latency for nfsd halo replication in msec."},
+ {.key = {"halo-max-latency"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = AFR_HALO_MAX_LATENCY,
+ .default_value = "5",
+ .op_version = {GD_OP_VERSION_3_11_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate", "halo"},
+ .description = "Maximum latency for halo replication in msec."},
+ {.key = {"halo-max-replicas"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = 99999,
+ .default_value = "99999",
+ .op_version = {GD_OP_VERSION_3_11_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate", "halo"},
+ .description = "The maximum number of halo replicas; replicas"
+ " beyond this value will be written asynchronously"
+ "via the SHD."},
+ {.key = {"halo-min-replicas"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = 99999,
+ .default_value = "2",
+ .op_version = {GD_OP_VERSION_3_11_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate", "halo"},
+ .description = "The minimmum number of halo replicas, before adding "
+ "out of region replicas."},
+ {.key = {"heal-wait-queue-length"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0,
+ .max = 10000, /*Around 100MB with sizeof(afr_local_t)= 10496 bytes*/
+ .default_value = "128",
+ .validate = GF_OPT_VALIDATE_MIN,
+ .op_version = {GD_OP_VERSION_3_7_10},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This specifies the number of heals that can be queued"
+ " for the parallel background self heal jobs."},
+ {.key = {"data-self-heal"},
+ .type = GF_OPTION_TYPE_STR,
+ .value = {"1", "on", "yes", "true", "enable", "0", "off", "no", "false",
+ "disable", "open"},
+ .default_value = "off",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "Using this option we can enable/disable data "
+ "self-heal on the file. \"open\" means data "
+ "self-heal action will only be triggered by file "
+ "open operations."},
+ {.key = {"data-self-heal-algorithm"},
+ .type = GF_OPTION_TYPE_STR,
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "Select between \"full\", \"diff\". The "
+ "\"full\" algorithm copies the entire file from "
+ "source to sink. The \"diff\" algorithm copies to "
+ "sink only those blocks whose checksums don't match "
+ "with those of source. If no option is configured "
+ "the option is chosen dynamically as follows: "
+ "If the file does not exist on one of the sinks "
+ "or empty file exists or if the source file size is "
+ "about the same as page size the entire file will "
+ "be read and written i.e \"full\" algo, "
+ "otherwise \"diff\" algo is chosen.",
+ .value = {"diff", "full"}},
+ {.key = {"data-self-heal-window-size"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = 1024,
+ .default_value = "1",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "Maximum number blocks per file for which self-heal "
+ "process would be applied simultaneously."},
+ {.key = {"metadata-self-heal"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ /*.validate_fn = validate_replica*/
+ .description = "Using this option we can enable/disable metadata "
+ "i.e. Permissions, ownerships, xattrs self-heal on "
+ "the file/directory."},
+ {.key = {"entry-self-heal"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ /*.validate_fn = validate_replica*/
+ .description = "Using this option we can enable/disable entry "
+ "self-heal on the directory."},
+ {.key = {"data-change-log"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This option exists only for backward compatibility "
+ "and configuring it doesn't have any effect"},
+ {.key = {"metadata-change-log"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This option exists only for backward compatibility "
+ "and configuring it doesn't have any effect"},
+ {.key = {"entry-change-log"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This option exists only for backward compatibility "
+ "and configuring it doesn't have any effect"},
+ {.key = {"optimistic-change-log"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .description = "Entry/Metadata fops will not perform "
+ "pre fop changelog operations in afr transaction "
+ "if this option is enabled."},
+ {.key = {"inodelk-trace"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "Enabling this option logs inode lock/unlocks"},
+ {.key = {"entrylk-trace"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "Enabling this option logs entry lock/unlocks"},
+ {.key = {"pre-op-compat"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .description = "Use separate pre-op xattrop() FOP rather than "
+ "overloading xdata of the OP"},
+ {.key = {"eager-lock"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description =
+ "Enable/Disable eager lock for replica volume. "
+ "Lock phase of a transaction has two sub-phases. "
+ "First is an attempt to acquire locks in parallel by "
+ "broadcasting non-blocking lock requests. If lock "
+ "acquisition fails on any server, then the held locks "
+ "are unlocked and we revert to a blocking locks mode "
+ "sequentially on one server after another. If this "
+ "option is enabled the initial broadcasting lock "
+ "request attempts to acquire a full lock on the entire file. "
+ "If this fails, we revert back to the sequential "
+ "\"regional\" blocking locks as before. In the case "
+ "where such an \"eager\" lock is granted in the "
+ "non-blocking phase, it gives rise to an opportunity "
+ "for optimization. i.e, if the next write transaction "
+ "on the same FD arrives before the unlock phase of "
+ "the first transaction, it \"takes over\" the full "
+ "file lock. Similarly if yet another data transaction "
+ "arrives before the unlock phase of the \"optimized\" "
+ "transaction, that in turn \"takes over\" the lock as "
+ "well. The actual unlock now happens at the end of "
+ "the last \"optimized\" transaction."
+
+ },
+ {.key = {"self-heal-daemon"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE,
+ .tags = {"replicate"},
+ /*.validate_fn = validate_replica_heal_enable_disable*/
+ .description = "This option applies to only self-heal-daemon. "
+ "Index directory crawl and automatic healing of files "
+ "will not be performed if this option is turned off."},
+ {.key = {"iam-self-heal-daemon"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "This option differentiates if the replicate "
+ "translator is running as part of self-heal-daemon "
+ "or not."},
+ {.key = {"iam-nfs-daemon"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "This option differentiates if the replicate "
+ "translator is running as part of an NFS daemon "
+ "or not."},
+ {
+ .key = {"quorum-type"},
+ .type = GF_OPTION_TYPE_STR,
+ .value = {"none", "auto", "fixed"},
+ .default_value = "none",
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ /*.option = quorum-type*/
+ .description = "If value is \"fixed\" only allow writes if "
+ "quorum-count bricks are present. If value is "
+ "\"auto\" only allow writes if more than half of "
+ "bricks, or exactly half including the first, are "
+ "present.",
+ },
+ {
+ .key = {"quorum-count"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = INT_MAX,
+ .default_value = 0,
+ .op_version = {1},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ /*.option = quorum-count*/
+ /*.validate_fn = validate_quorum_count*/
+ .description = "If quorum-type is \"fixed\" only allow writes if "
+ "this many bricks are present. Other quorum types "
+ "will OVERWRITE this value.",
+ },
+ {
+ .key = {"quorum-reads"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "no",
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This option has been removed. Reads are not allowed "
+ "if quorum is not met.",
+ },
+ {
+ .key = {"node-uuid"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Local glusterd uuid string, used in starting "
+ "self-heal-daemon so that it can crawl only on "
+ "local index directories.",
+ },
+ {
+ .key = {"post-op-delay-secs"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0,
+ .max = INT_MAX,
+ .default_value = "1",
+ .op_version = {2},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "Time interval induced artificially before "
+ "post-operation phase of the transaction to "
+ "enhance overlap of adjacent write operations.",
+ },
+ {
+ .key = {AFR_SH_READDIR_SIZE_KEY},
+ .type = GF_OPTION_TYPE_SIZET,
+ .description = "readdirp size for performing entry self-heal",
+ .min = 1024,
+ .max = 131072,
+ .op_version = {2},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE,
+ .tags = {"replicate"},
+ .default_value = "1KB",
+ },
+ {
+ .key = {"ensure-durability"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .op_version = {3},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "Afr performs fsyncs for transactions if this "
+ "option is on to make sure the changelogs/data is "
+ "written to the disk",
+ .default_value = "on",
+ },
+ {
+ .key = {"afr-dirty-xattr"},
+ .type = GF_OPTION_TYPE_STR,
+ .default_value = AFR_DIRTY_DEFAULT,
+ },
+ {.key = {"afr-pending-xattr"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Comma separated list of xattrs that are used to "
+ "capture information on pending heals."},
+ {
+ .key = {"metadata-splitbrain-forced-heal"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ },
+ {.key = {"heal-timeout"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 5,
+ .max = INT_MAX,
+ .default_value = "600",
+ .op_version = {2},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "time interval for checking the need to self-heal "
+ "in self-heal-daemon"},
+ {
+ .key = {"consistent-metadata"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "no",
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "If this option is enabled, readdirp will force "
+ "lookups on those entries read whose read child is "
+ "not the same as that of the parent. This will "
+ "guarantee that all read operations on a file serve "
+ "attributes from the same subvol as long as it holds "
+ " a good copy of the file/dir.",
+ },
+ {.key = {"arbiter-count"},
+ .type = GF_OPTION_TYPE_INT,
+ .description = "subset of child_count. Has to be 0 or 1."},
+ {
+ .key = {"thin-arbiter"},
+ .type = GF_OPTION_TYPE_STR,
+ .op_version = {GD_OP_VERSION_4_1_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .tags = {"replicate"},
+ .description = "contains host:path of thin abriter brick",
+ },
+ {.key = {"shd-max-threads"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = 64,
+ .default_value = "1",
+ .op_version = {GD_OP_VERSION_3_7_12},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "Maximum number of parallel heals SHD can do per "
+ "local brick. This can substantially lower heal times"
+ ", but can also crush your bricks if you don't have "
+ "the storage hardware to support this."},
+ {
+ .key = {"shd-wait-qlength"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 1,
+ .max = 655536,
+ .default_value = "1024",
+ .op_version = {GD_OP_VERSION_3_7_12},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This option can be used to control number of heals"
+ " that can wait in SHD per subvolume",
+ },
+ {
+ .key = {"locking-scheme"},
+ .type = GF_OPTION_TYPE_STR,
+ .value = {"full", "granular"},
+ .default_value = "full",
+ .op_version = {GD_OP_VERSION_3_7_12},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "If this option is set to granular, self-heal will "
+ "stop being compatible with afr-v1, which helps afr "
+ "be more granular while self-healing",
+ },
+ {.key = {"full-lock"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "yes",
+ .op_version = {GD_OP_VERSION_3_13_2},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE,
+ .tags = {"replicate"},
+ .description = "If this option is disabled, then the IOs will take "
+ "range locks same as versions till 3.13.1."},
+ {
+ .key = {"granular-entry-heal"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "no",
+ .op_version = {GD_OP_VERSION_3_8_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "If this option is enabled, self-heal will resort to "
+ "granular way of recording changelogs and doing entry "
+ "self-heal.",
+ },
+ {
+ .key = {"favorite-child-policy"},
+ .type = GF_OPTION_TYPE_STR,
+ .value = {"none", "size", "ctime", "mtime", "majority"},
+ .default_value = "none",
+ .op_version = {GD_OP_VERSION_3_7_12},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This option can be used to automatically resolve "
+ "split-brains using various policies without user "
+ "intervention. \"size\" picks the file with the "
+ "biggest size as the source. \"ctime\" and \"mtime\" "
+ "pick the file with the latest ctime and mtime "
+ "respectively as the source. \"majority\" picks a file"
+ " with identical mtime and size in more than half the "
+ "number of bricks in the replica.",
+ },
+ {
+ .key = {"consistent-io"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "no",
+ .description = "If this option is enabled, i/o will fail even if "
+ "one of the bricks is down in the replicas",
+ },
+ {.key = {"use-compound-fops"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "no",
+ .op_version = {GD_OP_VERSION_3_8_4},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"replicate"},
+ .description = "This option exists only for backward compatibility "
+ "and configuring it doesn't have any effect"},
+ {.key = {"use-anonymous-inode"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "no",
+ .op_version = {GD_OP_VERSION_8_0},
+ .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE,
+ .tags = {"replicate"},
+ .description = "Setting this option heals directory renames efficiently"},
+
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .notify = notify,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .dumpops = &dumpops,
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "replicate",
+ .category = GF_MAINTAINED,
};
diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
index 7a99e70c80e..d62f9a9caf2 100644
--- a/xlators/cluster/afr/src/afr.h
+++ b/xlators/cluster/afr/src/afr.h
@@ -8,173 +8,292 @@
cases as published by the Free Software Foundation.
*/
-
#ifndef __AFR_H__
#define __AFR_H__
-#include "call-stub.h"
-#include "compat-errno.h"
+#include <glusterfs/call-stub.h>
+#include <glusterfs/compat-errno.h>
#include "afr-mem-types.h"
#include "libxlator.h"
-#include "timer.h"
-#include "syncop.h"
+#include <glusterfs/timer.h>
+#include <glusterfs/syncop.h>
#include "afr-self-heald.h"
#include "afr-messages.h"
-#define AFR_XATTR_PREFIX "trusted.afr"
+#define SHD_INODE_LRU_LIMIT 1
#define AFR_PATHINFO_HEADER "REPLICATE:"
#define AFR_SH_READDIR_SIZE_KEY "self-heal-readdir-size"
#define AFR_SH_DATA_DOMAIN_FMT "%s:self-heal"
#define AFR_DIRTY_DEFAULT AFR_XATTR_PREFIX ".dirty"
-#define AFR_DIRTY (((afr_private_t *) (THIS->private))->afr_dirty)
+#define AFR_DIRTY (((afr_private_t *)(THIS->private))->afr_dirty)
-#define AFR_LOCKEE_COUNT_MAX 3
-#define AFR_DOM_COUNT_MAX 3
-#define AFR_NUM_CHANGE_LOGS 3 /*data + metadata + entry*/
+#define AFR_LOCKEE_COUNT_MAX 3
+#define AFR_DOM_COUNT_MAX 3
+#define AFR_NUM_CHANGE_LOGS 3 /*data + metadata + entry*/
#define AFR_DEFAULT_SPB_CHOICE_TIMEOUT 300 /*in seconds*/
#define ARBITER_BRICK_INDEX 2
+#define THIN_ARBITER_BRICK_INDEX 2
+#define AFR_TA_DOM_NOTIFY "afr.ta.dom-notify"
+#define AFR_TA_DOM_MODIFY "afr.ta.dom-modify"
+
+#define AFR_LK_HEAL_DOM "afr.lock-heal.domain"
+
+#define AFR_HALO_MAX_LATENCY 99999
+#define AFR_ANON_DIR_PREFIX ".glusterfs-anonymous-inode"
+
+#define PFLAG_PENDING (1 << 0)
+#define PFLAG_SBRAIN (1 << 1)
+
+typedef int (*afr_lock_cbk_t)(call_frame_t *frame, xlator_t *this);
+
+typedef int (*afr_read_txn_wind_t)(call_frame_t *frame, xlator_t *this,
+ int subvol);
+
+typedef int (*afr_inode_refresh_cbk_t)(call_frame_t *frame, xlator_t *this,
+ int err);
+
+typedef int (*afr_changelog_resume_t)(call_frame_t *frame, xlator_t *this);
+
+#define AFR_COUNT(array, max) \
+ ({ \
+ int __i; \
+ int __res = 0; \
+ for (__i = 0; __i < max; __i++) \
+ if (array[__i]) \
+ __res++; \
+ __res; \
+ })
+#define AFR_INTERSECT(dst, src1, src2, max) \
+ ({ \
+ int __i; \
+ for (__i = 0; __i < max; __i++) \
+ dst[__i] = src1[__i] && src2[__i]; \
+ })
+#define AFR_CMP(a1, a2, len) \
+ ({ \
+ int __cmp = 0; \
+ int __i; \
+ for (__i = 0; __i < len; __i++) \
+ if (a1[__i] != a2[__i]) { \
+ __cmp = 1; \
+ break; \
+ } \
+ __cmp; \
+ })
+#define AFR_IS_ARBITER_BRICK(priv, index) \
+ ((priv->arbiter_count == 1) && (index == ARBITER_BRICK_INDEX))
+
+#define AFR_SET_ERROR_AND_CHECK_SPLIT_BRAIN(ret, errnum) \
+ do { \
+ local->op_ret = ret; \
+ local->op_errno = errnum; \
+ if (local->op_errno == EIO) \
+ gf_msg(this->name, GF_LOG_ERROR, local->op_errno, \
+ AFR_MSG_SPLIT_BRAIN, \
+ "Failing %s on gfid %s: " \
+ "split-brain observed.", \
+ gf_fop_list[local->op], uuid_utoa(local->inode->gfid)); \
+ } while (0)
+
+#define AFR_ERROR_OUT_IF_FDCTX_INVALID(__fd, __this, __error, __label) \
+ do { \
+ afr_fd_ctx_t *__fd_ctx = NULL; \
+ __fd_ctx = afr_fd_ctx_get(__fd, __this); \
+ if (__fd_ctx && __fd_ctx->is_fd_bad) { \
+ __error = EBADF; \
+ goto __label; \
+ } \
+ } while (0)
-typedef int (*afr_lock_cbk_t) (call_frame_t *frame, xlator_t *this);
-
-typedef int (*afr_read_txn_wind_t) (call_frame_t *frame, xlator_t *this, int subvol);
-
-typedef int (*afr_inode_refresh_cbk_t) (call_frame_t *frame, xlator_t *this, int err);
-
-typedef int (*afr_changelog_resume_t) (call_frame_t *frame, xlator_t *this);
-
-#define alloca0(size) ({void *__ptr; __ptr = alloca(size); memset(__ptr, 0, size); __ptr;})
-#define AFR_COUNT(array,max) ({int __i; int __res = 0; for (__i = 0; __i < max; __i++) if (array[__i]) __res++; __res;})
-#define AFR_INTERSECT(dst,src1,src2,max) ({int __i; for (__i = 0; __i < max; __i++) dst[__i] = src1[__i] && src2[__i];})
-#define AFR_CMP(a1,a2,len) ({int __cmp = 0; int __i; for (__i = 0; __i < len; __i++) if (a1[__i] != a2[__i]) { __cmp = 1; break;} __cmp;})
-#define AFR_IS_ARBITER_BRICK(priv, index) ((priv->arbiter_count == 1) && (index == ARBITER_BRICK_INDEX))
-typedef struct _afr_private {
- gf_lock_t lock; /* to guard access to child_count, etc */
- unsigned int child_count; /* total number of children */
- unsigned int arbiter_count; /*subset of child_count.
- Has to be 0 or 1.*/
-
- xlator_t **children;
-
- inode_t *root_inode;
-
- unsigned char *child_up;
- unsigned char *local;
-
- char **pending_key;
-
- char *data_self_heal; /* on/off/open */
- char * data_self_heal_algorithm; /* name of algorithm */
- unsigned int data_self_heal_window_size; /* max number of pipelined
- read/writes */
-
- struct list_head heal_waiting; /*queue for files that need heal*/
- uint32_t heal_wait_qlen; /*configurable queue length for heal_waiting*/
- int32_t heal_waiters; /* No. of elements currently in wait queue.*/
-
- struct list_head healing;/* queue for files that are undergoing
- background heal*/
- uint32_t background_self_heal_count;/*configurable queue length for
- healing queue*/
- int32_t healers;/* No. of elements currently undergoing background
- heal*/
-
- gf_boolean_t metadata_self_heal; /* on/off */
- gf_boolean_t entry_self_heal; /* on/off */
-
- gf_boolean_t data_change_log; /* on/off */
- gf_boolean_t metadata_change_log; /* on/off */
- gf_boolean_t entry_change_log; /* on/off */
-
- gf_boolean_t metadata_splitbrain_forced_heal; /* on/off */
- int read_child; /* read-subvolume */
- unsigned int hash_mode; /* for when read_child is not set */
- int favorite_child; /* subvolume to be preferred in resolving
- split-brain cases */
-
- gf_boolean_t inodelk_trace;
- gf_boolean_t entrylk_trace;
-
- unsigned int wait_count; /* # of servers to wait for success */
-
- uint64_t up_count; /* number of CHILD_UPs we have seen */
- uint64_t down_count; /* number of CHILD_DOWNs we have seen */
-
- gf_boolean_t optimistic_change_log;
- gf_boolean_t eager_lock;
- gf_boolean_t pre_op_compat; /* on/off */
- uint32_t post_op_delay_secs;
- unsigned int quorum_count;
- gf_boolean_t quorum_reads;
-
- char vol_uuid[UUID_SIZE + 1];
- int32_t *last_event;
-
- /* @event_generation: Keeps count of number of events received which can
- potentially impact consistency decisions. The events are CHILD_UP
- and CHILD_DOWN, when we have to recalculate the freshness/staleness
- of copies to detect if changes had happened while the other server
- was down. CHILD_DOWN and CHILD_UP can also be received on network
- disconnect/reconnects and not necessarily server going down/up.
- Recalculating freshness/staleness on network events is equally
- important as we might have had a network split brain.
- */
- uint32_t event_generation;
-
- gf_boolean_t choose_local;
- gf_boolean_t did_discovery;
- uint64_t sh_readdir_size;
- gf_boolean_t ensure_durability;
- char *sh_domain;
- char *afr_dirty;
-
- afr_self_heald_t shd;
-
- gf_boolean_t consistent_metadata;
- uint64_t spb_choice_timeout;
- gf_boolean_t need_heal;
-
- /* pump dependencies */
- void *pump_private;
- gf_boolean_t use_afr_in_pump;
-} afr_private_t;
-
+typedef enum {
+ AFR_READ_POLICY_FIRST_UP,
+ AFR_READ_POLICY_GFID_HASH,
+ AFR_READ_POLICY_GFID_PID_HASH,
+ AFR_READ_POLICY_LESS_LOAD,
+ AFR_READ_POLICY_LEAST_LATENCY,
+ AFR_READ_POLICY_LOAD_LATENCY_HYBRID,
+} afr_read_hash_mode_t;
typedef enum {
- AFR_DATA_TRANSACTION, /* truncate, write, ... */
- AFR_METADATA_TRANSACTION, /* chmod, chown, ... */
- AFR_ENTRY_TRANSACTION, /* create, rmdir, ... */
- AFR_ENTRY_RENAME_TRANSACTION, /* rename */
-} afr_transaction_type;
+ AFR_FAV_CHILD_NONE,
+ AFR_FAV_CHILD_BY_SIZE,
+ AFR_FAV_CHILD_BY_CTIME,
+ AFR_FAV_CHILD_BY_MTIME,
+ AFR_FAV_CHILD_BY_MAJORITY,
+ AFR_FAV_CHILD_POLICY_MAX,
+} afr_favorite_child_policy;
typedef enum {
- AFR_TRANSACTION_LK,
- AFR_SELFHEAL_LK,
-} transaction_lk_type_t;
+ AFR_SELFHEAL_DATA_FULL = 0,
+ AFR_SELFHEAL_DATA_DIFF,
+ AFR_SELFHEAL_DATA_DYNAMIC,
+} afr_data_self_heal_type_t;
typedef enum {
- AFR_LOCK_OP,
- AFR_UNLOCK_OP,
-} afr_lock_op_type_t;
+ AFR_CHILD_UNKNOWN = -1,
+ AFR_CHILD_ZERO,
+ AFR_CHILD_ONE,
+ AFR_CHILD_THIN_ARBITER,
+} afr_child_index;
typedef enum {
- AFR_DATA_SELF_HEAL_LK,
- AFR_METADATA_SELF_HEAL_LK,
- AFR_ENTRY_SELF_HEAL_LK,
-}selfheal_lk_type_t;
+ TA_WAIT_FOR_NOTIFY_LOCK_REL, /*FOP came after notify domain lock upcall
+ notification and waiting for its release.*/
+ TA_GET_INFO_FROM_TA_FILE, /*FOP needs post-op on ta file to get
+ *info about which brick is bad.*/
+ TA_INFO_IN_MEMORY_SUCCESS, /*Bad brick info is in memory and fop failed
+ *on BAD brick - Success*/
+ TA_INFO_IN_MEMORY_FAILED, /*Bad brick info is in memory and fop failed
+ *on GOOD brick - Failed*/
+ TA_SUCCESS, /*FOP succeeded on both data bricks.*/
+} afr_ta_fop_state_t;
+
+struct afr_nfsd {
+ uint32_t halo_max_latency_msec;
+ gf_boolean_t iamnfsd;
+};
+
+typedef struct _afr_lk_heal_info {
+ fd_t *fd;
+ int32_t cmd;
+ struct gf_flock flock;
+ dict_t *xdata_req;
+ unsigned char *locked_nodes;
+ struct list_head pos;
+ gf_lkowner_t lk_owner;
+ pid_t pid;
+ int32_t *child_up_event_gen;
+ int32_t *child_down_event_gen;
+} afr_lk_heal_info_t;
+
+typedef struct _afr_private {
+ gf_lock_t lock; /* to guard access to child_count, etc */
+ unsigned int child_count; /* total number of children */
+ unsigned int arbiter_count; /*subset of child_count.
+ Has to be 0 or 1.*/
+
+ xlator_t **children;
+
+ inode_t *root_inode;
+
+ int favorite_child; /* subvolume to be preferred in resolving
+ split-brain cases */
+ /* For thin-arbiter. */
+ uuid_t ta_gfid;
+ unsigned int thin_arbiter_count; /* 0 or 1 at the moment.*/
+ int ta_bad_child_index;
+ int ta_event_gen;
+ unsigned int ta_in_mem_txn_count;
+ unsigned int ta_on_wire_txn_count;
+ struct list_head ta_waitq;
+ struct list_head ta_onwireq;
+
+ unsigned char *anon_inode;
+ unsigned char *child_up;
+ unsigned char *halo_child_up;
+ int64_t *child_latency;
+ unsigned char *local;
+
+ char **pending_key;
+
+ afr_data_self_heal_type_t data_self_heal_algorithm;
+ unsigned int data_self_heal_window_size; /* max number of pipelined
+ read/writes */
+
+ struct list_head heal_waiting; /*queue for files that need heal*/
+ uint32_t heal_wait_qlen; /*configurable queue length for heal_waiting*/
+ int32_t heal_waiters; /* No. of elements currently in wait queue.*/
+
+ struct list_head healing; /* queue for files that are undergoing
+ background heal*/
+ uint32_t background_self_heal_count; /*configurable queue length for
+ healing queue*/
+ int32_t healers; /* No. of elements currently undergoing background
+ heal*/
+
+ gf_boolean_t release_ta_notify_dom_lock;
+
+ gf_boolean_t metadata_self_heal; /* on/off */
+ gf_boolean_t entry_self_heal; /* on/off */
+
+ gf_boolean_t metadata_splitbrain_forced_heal; /* on/off */
+ int read_child; /* read-subvolume */
+ gf_atomic_t *pending_reads; /*No. of pending read cbks per child.*/
+
+ gf_timer_t *timer; /* launched when parent up is received */
+
+ unsigned int wait_count; /* # of servers to wait for success */
+
+ unsigned char ta_child_up;
+ gf_boolean_t optimistic_change_log;
+ gf_boolean_t eager_lock;
+ gf_boolean_t pre_op_compat; /* on/off */
+ uint32_t post_op_delay_secs;
+ unsigned int quorum_count;
+
+ off_t ta_notify_dom_lock_offset;
+ afr_favorite_child_policy fav_child_policy; /*Policy to use for automatic
+ resolution of split-brains.*/
+ afr_read_hash_mode_t hash_mode; /* for when read_child is not set */
+
+ int32_t *last_event;
+
+ /* @event_generation: Keeps count of number of events received which can
+ potentially impact consistency decisions. The events are CHILD_UP
+ and CHILD_DOWN, when we have to recalculate the freshness/staleness
+ of copies to detect if changes had happened while the other server
+ was down. CHILD_DOWN and CHILD_UP can also be received on network
+ disconnect/reconnects and not necessarily server going down/up.
+ Recalculating freshness/staleness on network events is equally
+ important as we might have had a network split brain.
+ */
+ uint32_t event_generation;
+ char vol_uuid[UUID_SIZE + 1];
+
+ gf_boolean_t choose_local;
+ gf_boolean_t did_discovery;
+ gf_boolean_t ensure_durability;
+ gf_boolean_t halo_enabled;
+ gf_boolean_t consistent_metadata;
+ gf_boolean_t need_heal;
+ gf_boolean_t granular_locks;
+ uint64_t sh_readdir_size;
+ char *sh_domain;
+ char *afr_dirty;
+
+ uint64_t spb_choice_timeout;
+
+ afr_self_heald_t shd;
+ struct afr_nfsd nfsd;
+
+ uint32_t halo_max_latency_msec;
+ uint32_t halo_max_replicas;
+ uint32_t halo_min_replicas;
+
+ gf_boolean_t full_lock;
+ gf_boolean_t esh_granular;
+ gf_boolean_t consistent_io;
+ gf_boolean_t data_self_heal; /* on/off */
+ gf_boolean_t use_anon_inode;
+
+ /*For lock healing.*/
+ struct list_head saved_locks;
+ struct list_head lk_healq;
+
+ /*For anon-inode handling */
+ char anon_inode_name[NAME_MAX + 1];
+ char anon_gfid_str[UUID_SIZE + 1];
+} afr_private_t;
typedef enum {
- AFR_INODELK_TRANSACTION,
- AFR_INODELK_NB_TRANSACTION,
- AFR_ENTRYLK_TRANSACTION,
- AFR_ENTRYLK_NB_TRANSACTION,
- AFR_INODELK_SELFHEAL,
- AFR_INODELK_NB_SELFHEAL,
- AFR_ENTRYLK_SELFHEAL,
- AFR_ENTRYLK_NB_SELFHEAL,
-} afr_lock_call_type_t;
+ AFR_DATA_TRANSACTION, /* truncate, write, ... */
+ AFR_METADATA_TRANSACTION, /* chmod, chown, ... */
+ AFR_ENTRY_TRANSACTION, /* create, rmdir, ... */
+ AFR_ENTRY_RENAME_TRANSACTION, /* rename */
+} afr_transaction_type;
/*
xattr format: trusted.afr.volume = [x y z]
@@ -184,867 +303,940 @@ typedef enum {
*/
static inline int
-afr_index_for_transaction_type (afr_transaction_type type)
+afr_index_for_transaction_type(afr_transaction_type type)
{
- switch (type) {
-
+ switch (type) {
case AFR_DATA_TRANSACTION:
- return 0;
+ return 0;
case AFR_METADATA_TRANSACTION:
- return 1;
+ return 1;
case AFR_ENTRY_TRANSACTION:
case AFR_ENTRY_RENAME_TRANSACTION:
- return 2;
- }
+ return 2;
+ }
- return -1; /* make gcc happy */
+ return -1; /* make gcc happy */
}
static inline int
-afr_index_from_ia_type (ia_type_t type)
+afr_index_from_ia_type(ia_type_t type)
{
- switch (type) {
+ switch (type) {
case IA_IFDIR:
- return afr_index_for_transaction_type (AFR_ENTRY_TRANSACTION);
+ return afr_index_for_transaction_type(AFR_ENTRY_TRANSACTION);
case IA_IFREG:
- return afr_index_for_transaction_type (AFR_DATA_TRANSACTION);
- default: return -1;
- }
+ return afr_index_for_transaction_type(AFR_DATA_TRANSACTION);
+ default:
+ return -1;
+ }
}
typedef struct {
- loc_t loc;
- char *basename;
- unsigned char *locked_nodes;
- int locked_count;
+ struct gf_flock flock;
+ loc_t loc;
+ fd_t *fd;
+ char *basename;
+ unsigned char *locked_nodes;
+ int locked_count;
-} afr_entry_lockee_t;
+} afr_lockee_t;
int
-afr_entry_lockee_cmp (const void *l1, const void *l2);
+afr_entry_lockee_cmp(const void *l1, const void *l2);
typedef struct {
- char *domain; /* Domain on which inodelk is taken */
- struct gf_flock flock;
- unsigned char *locked_nodes;
- int32_t lock_count;
-} afr_inodelk_t;
+ loc_t *lk_loc;
-typedef struct {
- loc_t *lk_loc;
+ afr_lockee_t lockee[AFR_LOCKEE_COUNT_MAX];
- int lockee_count;
- afr_entry_lockee_t lockee[AFR_LOCKEE_COUNT_MAX];
+ const char *lk_basename;
+ const char *lower_basename;
+ const char *higher_basename;
- afr_inodelk_t inodelk[AFR_DOM_COUNT_MAX];
- const char *lk_basename;
- const char *lower_basename;
- const char *higher_basename;
- char lower_locked;
- char higher_locked;
+ unsigned char *lower_locked_nodes;
- unsigned char *locked_nodes;
- unsigned char *lower_locked_nodes;
+ afr_lock_cbk_t lock_cbk;
- selfheal_lk_type_t selfheal_lk_type;
- transaction_lk_type_t transaction_lk_type;
+ int lockee_count;
- int32_t lock_count;
- int32_t entrylk_lock_count;
+ int32_t lk_call_count;
+ int32_t lk_expected_count;
+ int32_t lk_attempted_count;
- uint64_t lock_number;
- int32_t lk_call_count;
- int32_t lk_expected_count;
- int32_t lk_attempted_count;
-
- int32_t lock_op_ret;
- int32_t lock_op_errno;
- afr_lock_cbk_t lock_cbk;
- char *domain; /* Domain on which inode/entry lock/unlock in progress.*/
+ int32_t lock_op_ret;
+ int32_t lock_op_errno;
+ char *domain; /* Domain on which inode/entry lock/unlock in progress.*/
+ int32_t lock_count;
+ char lower_locked;
+ char higher_locked;
} afr_internal_lock_t;
struct afr_reply {
- int valid;
- int32_t op_ret;
- int32_t op_errno;
- dict_t *xattr;/*For xattrop*/
- dict_t *xdata;
- struct iatt poststat;
- struct iatt postparent;
- struct iatt prestat;
- struct iatt preparent;
- struct iatt preparent2;
- struct iatt postparent2;
- /* For rchecksum */
- uint8_t checksum[MD5_DIGEST_LENGTH];
- gf_boolean_t buf_has_zeroes;
- /* For lookup */
- int8_t need_heal;
+ int valid;
+ int32_t op_ret;
+ dict_t *xattr; /*For xattrop*/
+ dict_t *xdata;
+ struct iatt poststat;
+ struct iatt postparent;
+ struct iatt prestat;
+ struct iatt preparent;
+ struct iatt preparent2;
+ struct iatt postparent2;
+ int32_t op_errno;
+ /* For rchecksum */
+ uint8_t checksum[SHA256_DIGEST_LENGTH];
+ gf_boolean_t buf_has_zeroes;
+ gf_boolean_t fips_mode_rchecksum;
+ /* For lookup */
+ int8_t need_heal;
};
typedef enum {
- AFR_FD_NOT_OPENED,
- AFR_FD_OPENED,
- AFR_FD_OPENING
+ AFR_FD_NOT_OPENED,
+ AFR_FD_OPENED,
+ AFR_FD_OPENING
} afr_fd_open_status_t;
typedef struct {
- unsigned int *pre_op_done[AFR_NUM_CHANGE_LOGS];
- int inherited[AFR_NUM_CHANGE_LOGS];
- int on_disk[AFR_NUM_CHANGE_LOGS];
- afr_fd_open_status_t *opened_on; /* which subvolumes the fd is open on */
-
- unsigned int *lock_piggyback;
- unsigned int *lock_acquired;
-
- int flags;
-
- /* used for delayed-post-op optimization */
- pthread_mutex_t delay_lock;
- gf_timer_t *delay_timer;
- call_frame_t *delay_frame;
-
- /* set if any write on this fd was a non stable write
- (i.e, without O_SYNC or O_DSYNC)
- */
- gf_boolean_t witnessed_unstable_write;
-
- /* @open_fd_count:
- Number of open FDs queried from the server, as queried through
- xdata in FOPs. Currently, used to decide if eager-locking must be
- temporarily disabled.
- */
- uint32_t open_fd_count;
-
-
- /* list of frames currently in progress */
- struct list_head eager_locked;
-
- /* the subvolume on which the latest sequence of readdirs (starting
- at offset 0) has begun. Till the next readdir request with 0 offset
- arrives, we continue to read off this subvol.
- */
- int readdir_subvol;
+ afr_fd_open_status_t *opened_on; /* which subvolumes the fd is open on */
+ int flags;
+
+ /* the subvolume on which the latest sequence of readdirs (starting
+ at offset 0) has begun. Till the next readdir request with 0 offset
+ arrives, we continue to read off this subvol.
+ */
+ int readdir_subvol;
+ /* lock-healing related members. */
+ gf_boolean_t is_fd_bad;
+ afr_lk_heal_info_t *lk_heal_info;
+
} afr_fd_ctx_t;
+typedef enum {
+ AFR_FOP_LOCK_PARALLEL,
+ AFR_FOP_LOCK_SERIAL,
+ AFR_FOP_LOCK_QUORUM_FAILED,
+} afr_fop_lock_state_t;
+
+typedef struct _afr_inode_lock_t {
+ /* @num_inodelks:
+ Number of inodelks queried from the server, as queried through
+ xdata in FOPs. Currently, used to decide if eager-locking must be
+ temporarily disabled.
+ */
+ int32_t num_inodelks;
+ unsigned int event_generation;
+ gf_timer_t *delay_timer;
+ struct list_head owners; /*Transactions that are performing fop*/
+ struct list_head post_op; /*Transactions that are done with the fop
+ *So can not conflict with the fops*/
+ struct list_head waiting; /*Transaction that are waiting for
+ *conflicting transactions to complete*/
+ struct list_head frozen; /*Transactions that need to go as part of
+ * next batch of eager-lock*/
+ gf_boolean_t release;
+ gf_boolean_t acquired;
+} afr_lock_t;
+
+typedef struct _afr_inode_ctx {
+ uint64_t read_subvol;
+ uint64_t write_subvol;
+ int lock_count;
+ int spb_choice;
+ gf_timer_t *timer;
+ unsigned int *pre_op_done[AFR_NUM_CHANGE_LOGS];
+ int inherited[AFR_NUM_CHANGE_LOGS];
+ int on_disk[AFR_NUM_CHANGE_LOGS];
+ /*Only 2 types of transactions support eager-locks now. DATA/METADATA*/
+ afr_lock_t lock[2];
+
+ /* @open_fd_count:
+ Number of open FDs queried from the server, as queried through
+ xdata in FOPs. Currently, used to decide if eager-locking must be
+ temporarily disabled.
+ */
+ uint32_t open_fd_count;
+ gf_boolean_t need_refresh;
+
+ /* set if any write on this fd was a non stable write
+ (i.e, without O_SYNC or O_DSYNC)
+ */
+ gf_boolean_t witnessed_unstable_write;
+} afr_inode_ctx_t;
typedef struct _afr_local {
- glusterfs_fop_t op;
- unsigned int call_count;
+ glusterfs_fop_t op;
+ unsigned int call_count;
- /* @event_generation: copy of priv->event_generation taken at the
- time of starting the transaction. The copy is made so that we
- have a stable value through the various phases of the transaction.
- */
- unsigned int event_generation;
+ /* @event_generation: copy of priv->event_generation taken at the
+ time of starting the transaction. The copy is made so that we
+ have a stable value through the various phases of the transaction.
+ */
+ unsigned int event_generation;
- uint32_t open_fd_count;
- gf_boolean_t update_open_fd_count;
+ uint32_t open_fd_count;
+ int32_t num_inodelks;
- gf_lkowner_t saved_lk_owner;
+ int32_t op_ret;
+ int32_t op_errno;
- int32_t op_ret;
- int32_t op_errno;
+ int dirty[AFR_NUM_CHANGE_LOGS];
- int32_t **pending;
+ int32_t **pending;
- int dirty[AFR_NUM_CHANGE_LOGS];
+ loc_t loc;
+ loc_t newloc;
- loc_t loc;
- loc_t newloc;
+ fd_t *fd;
+ afr_fd_ctx_t *fd_ctx;
- fd_t *fd;
- afr_fd_ctx_t *fd_ctx;
+ /* @child_up: copy of priv->child_up taken at the time of transaction
+ start. The copy is taken so that we have a stable child_up array
+ through the phases of the transaction as priv->child_up[i] can keep
+ changing through time.
+ */
+ unsigned char *child_up;
- /* @child_up: copy of priv->child_up taken at the time of transaction
- start. The copy is taken so that we have a stable child_up array
- through the phases of the transaction as priv->child_up[i] can keep
- changing through time.
- */
- unsigned char *child_up;
+ /* @read_attempted:
+ array of flags representing subvolumes where read operations of
+ the read transaction have already been attempted. The array is
+ first pre-filled with down subvolumes, and as reads are performed
+ on other subvolumes, those are set as well. This way if the read
+ operation fails we do not retry on that subvolume again.
+ */
+ unsigned char *read_attempted;
- /* @read_attempted:
- array of flags representing subvolumes where read operations of
- the read transaction have already been attempted. The array is
- first pre-filled with down subvolumes, and as reads are performed
- on other subvolumes, those are set as well. This way if the read
- operation fails we do not retry on that subvolume again.
- */
- unsigned char *read_attempted;
+ /* @readfn:
- /* @readfn:
+ pointer to function which will perform the read operation on a given
+ subvolume. Used in read transactions.
+ */
- pointer to function which will perform the read operation on a given
- subvolume. Used in read transactions.
- */
+ afr_read_txn_wind_t readfn;
- afr_read_txn_wind_t readfn;
+ /* @inode:
- /* @refreshed:
+ the inode on which the read txn is performed on. ref'ed and copied
+ from either fd->inode or loc.inode
+ */
- the inode was "refreshed" (i.e, pending xattrs from all subvols
- freshly inspected and inode ctx updated accordingly) as part of
- this transaction already.
- */
- gf_boolean_t refreshed;
+ inode_t *inode;
- /* @inode:
+ /* @parent[2]:
- the inode on which the read txn is performed on. ref'ed and copied
- from either fd->inode or loc.inode
- */
+ parent inode[s] on which directory transactions are performed.
+ */
- inode_t *inode;
+ inode_t *parent;
+ inode_t *parent2;
- /* @parent[2]:
+ /* @readable:
- parent inode[s] on which directory transactions are performed.
- */
+ array of flags representing servers from which a read can be
+ performed. This is the output of afr_inode_refresh()
+ */
+ unsigned char *readable;
+ unsigned char *readable2; /*For rename transaction*/
- inode_t *parent;
- inode_t *parent2;
+ afr_inode_refresh_cbk_t refreshfn;
- /* @readable:
+ /* @refreshinode:
- array of flags representing servers from which a read can be
- performed. This is the output of afr_inode_refresh()
- */
- unsigned char *readable;
+ Inode currently getting refreshed.
+ */
+ inode_t *refreshinode;
- afr_inode_refresh_cbk_t refreshfn;
+ dict_t *xattr_req;
- /* @refreshinode:
+ dict_t *dict;
- Inode currently getting refreshed.
- */
- inode_t *refreshinode;
+ int read_subvol; /* Current read subvolume */
- /*
- @pre_op_compat:
+ int optimistic_change_log;
- compatibility mode of pre-op. send a separate pre-op and
- op operations as part of transaction, rather than combining
- */
+ afr_internal_lock_t internal_lock;
- gf_boolean_t pre_op_compat;
+ /*To handle setattr/setxattr on yet to be linked inode from dht*/
+ uuid_t refreshgfid;
- dict_t *xattr_req;
+ /* @refreshed:
- afr_internal_lock_t internal_lock;
+ the inode was "refreshed" (i.e, pending xattrs from all subvols
+ freshly inspected and inode ctx updated accordingly) as part of
+ this transaction already.
+ */
+ gf_boolean_t refreshed;
- dict_t *dict;
+ gf_boolean_t update_num_inodelks;
+ gf_boolean_t update_open_fd_count;
- int optimistic_change_log;
- gf_boolean_t delayed_post_op;
+ /*
+ @pre_op_compat:
- /* Is the current writev() going to perform a stable write?
- i.e, is fd->flags or @flags writev param have O_SYNC or
- O_DSYNC?
- */
- gf_boolean_t stable_write;
+ compatibility mode of pre-op. send a separate pre-op and
+ op operations as part of transaction, rather than combining
+ */
- /* This write appended to the file. Nnot necessarily O_APPEND,
- just means the offset of write was at the end of file.
- */
- gf_boolean_t append_write;
+ gf_boolean_t pre_op_compat;
- /*
- This struct contains the arguments for the "continuation"
- (scheme-like) of fops
- */
+ /* Is the current writev() going to perform a stable write?
+ i.e, is fd->flags or @flags writev param have O_SYNC or
+ O_DSYNC?
+ */
+ gf_boolean_t stable_write;
+ /* This write appended to the file. Nnot necessarily O_APPEND,
+ just means the offset of write was at the end of file.
+ */
+ gf_boolean_t append_write;
+
+ /*
+ This struct contains the arguments for the "continuation"
+ (scheme-like) of fops
+ */
+
+ struct {
struct {
- struct {
- gf_boolean_t needs_fresh_lookup;
- uuid_t gfid_req;
- } lookup;
-
- struct {
- unsigned char buf_set;
- struct statvfs buf;
- } statfs;
-
- struct {
- int32_t flags;
- } open;
-
- struct {
- int32_t cmd;
- struct gf_flock user_flock;
- struct gf_flock ret_flock;
- unsigned char *locked_nodes;
- } lk;
-
- /* inode read */
-
- struct {
- int32_t mask;
- int last_index; /* index of the child we tried previously */
- } access;
-
- struct {
- int last_index;
- } stat;
-
- struct {
- int last_index;
- } fstat;
-
- struct {
- size_t size;
- int last_index;
- } readlink;
-
- struct {
- char *name;
- int last_index;
- long xattr_len;
- } getxattr;
-
- struct {
- size_t size;
- off_t offset;
- int last_index;
- uint32_t flags;
- } readv;
-
- /* dir read */
-
- struct {
- int success_count;
- int32_t op_ret;
- int32_t op_errno;
-
- uint32_t *checksum;
- } opendir;
-
- struct {
- int32_t op_ret;
- int32_t op_errno;
- size_t size;
- off_t offset;
- dict_t *dict;
- gf_boolean_t failed;
- int last_index;
- } readdir;
- /* inode write */
-
- struct {
- struct iatt prebuf;
- struct iatt postbuf;
- } inode_wfop; //common structure for all inode-write-fops
-
- struct {
- int32_t op_ret;
-
- struct iovec *vector;
- struct iobref *iobref;
- int32_t count;
- off_t offset;
- uint32_t flags;
- } writev;
-
- struct {
- off_t offset;
- } truncate;
-
- struct {
- off_t offset;
- } ftruncate;
-
- struct {
- struct iatt in_buf;
- int32_t valid;
- } setattr;
-
- struct {
- struct iatt in_buf;
- int32_t valid;
- } fsetattr;
-
- struct {
- dict_t *dict;
- int32_t flags;
- } setxattr;
-
- struct {
- dict_t *dict;
- int32_t flags;
- } fsetxattr;
-
- struct {
- char *name;
- } removexattr;
-
- struct {
- dict_t *xattr;
- gf_xattrop_flags_t optype;
- } xattrop;
-
- /* dir write */
-
- struct {
- inode_t *inode;
- struct iatt buf;
- struct iatt preparent;
- struct iatt postparent;
- struct iatt prenewparent;
- struct iatt postnewparent;
- } dir_fop; //common structure for all dir fops
-
- struct {
- fd_t *fd;
- dict_t *params;
- int32_t flags;
- mode_t mode;
- } create;
-
- struct {
- dev_t dev;
- mode_t mode;
- dict_t *params;
- } mknod;
-
- struct {
- int32_t mode;
- dict_t *params;
- } mkdir;
-
- struct {
- int flags;
- } rmdir;
-
- struct {
- dict_t *params;
- char *linkpath;
- } symlink;
-
- struct {
- int32_t mode;
- off_t offset;
- size_t len;
- } fallocate;
-
- struct {
- off_t offset;
- size_t len;
- } discard;
-
- struct {
- off_t offset;
- off_t len;
- struct iatt prebuf;
- struct iatt postbuf;
- } zerofill;
-
- struct {
- char *volume;
- int32_t cmd;
- struct gf_flock flock;
- } inodelk;
-
- struct {
- off_t offset;
- gf_seek_what_t what;
- } seek;
-
- } cont;
+ struct statvfs buf;
+ unsigned char buf_set;
+ } statfs;
struct {
- off_t start, len;
+ fd_t *fd;
+ int32_t flags;
+ } open;
- gf_boolean_t eager_lock_on;
- int *eager_lock;
+ struct {
+ struct gf_flock user_flock;
+ struct gf_flock ret_flock;
+ unsigned char *locked_nodes;
+ int32_t cmd;
+ /*For lock healing only.*/
+ unsigned char *dom_locked_nodes;
+ int32_t *dom_lock_op_ret;
+ int32_t *dom_lock_op_errno;
+ struct gf_flock *getlk_rsp;
+ } lk;
+
+ /* inode read */
- char *basename;
- char *new_basename;
+ struct {
+ int32_t mask;
+ int last_index; /* index of the child we tried previously */
+ } access;
- loc_t parent_loc;
- loc_t new_parent_loc;
+ struct {
+ int last_index;
+ } stat;
- afr_transaction_type type;
+ struct {
+ int last_index;
+ } fstat;
- /* stub to resume on destruction
- of the transaction frame */
- call_stub_t *resume_stub;
+ struct {
+ size_t size;
+ int last_index;
+ } readlink;
- struct list_head eager_locked;
+ struct {
+ char *name;
+ long xattr_len;
+ int last_index;
+ } getxattr;
- unsigned char *pre_op;
+ struct {
+ size_t size;
+ off_t offset;
+ int last_index;
+ uint32_t flags;
+ } readv;
- /* For arbiter configuration only. */
- dict_t **pre_op_xdata;
- unsigned char *pre_op_sources;
+ /* dir read */
- /* @failed_subvols: subvolumes on which a pre-op or a
- FOP failed. */
- unsigned char *failed_subvols;
+ struct {
+ uint32_t *checksum;
+ int success_count;
+ int32_t op_ret;
+ int32_t op_errno;
+ } opendir;
- /* @dirtied: flag which indicates whether we set dirty flag
- in the OP. Typically true when we are performing operation
- on more than one subvol and optimistic changelog is disabled
+ struct {
+ int32_t op_ret;
+ int32_t op_errno;
+ size_t size;
+ off_t offset;
+ dict_t *dict;
+ int last_index;
+ gf_boolean_t failed;
+ } readdir;
+ /* inode write */
- A 'true' value set in @dirtied flag means an 'undirtying'
- has to be done in POST-OP phase.
- */
- gf_boolean_t dirtied;
+ struct {
+ struct iatt prebuf;
+ struct iatt postbuf;
+ } inode_wfop; // common structure for all inode-write-fops
- /* @inherited: flag which indicates that the dirty flags
- of the previous transaction were inherited
- */
- gf_boolean_t inherited;
+ struct {
+ struct iovec *vector;
+ struct iobref *iobref;
+ off_t offset;
+ int32_t op_ret;
+ int32_t count;
+ uint32_t flags;
+ } writev;
- /*
- @no_uninherit: flag which indicates that a pre_op_uninherit()
- must _not_ be attempted (and returned as failure) always. This
- flag is set when a hard pre-op is performed, but not accounted
- for it in fd_ctx->on_disk[]. Such transactions are "isolated"
- from the pre-op piggybacking entirely and therefore uninherit
- must not be attempted.
- */
- gf_boolean_t no_uninherit;
+ struct {
+ off_t offset;
+ } truncate;
- /* @uninherit_done:
- @uninherit_value:
+ struct {
+ off_t offset;
+ } ftruncate;
- The above pair variables make pre_op_uninherit() idempotent.
- Both are FALSE initially. The first call to pre_op_uninherit
- sets @uninherit_done to TRUE and the return value to
- @uninherit_value. Further calls will check for @uninherit_done
- to be TRUE and if so will simply return @uninherit_value.
- */
- gf_boolean_t uninherit_done;
- gf_boolean_t uninherit_value;
+ struct {
+ struct iatt in_buf;
+ int32_t valid;
+ } setattr;
- /* @changelog_resume: function to be called after changlogging
- (either pre-op or post-op) is done
- */
+ struct {
+ struct iatt in_buf;
+ int32_t valid;
+ } fsetattr;
- afr_changelog_resume_t changelog_resume;
+ struct {
+ dict_t *dict;
+ int32_t flags;
+ } setxattr;
- call_frame_t *main_frame;
+ struct {
+ dict_t *dict;
+ int32_t flags;
+ } fsetxattr;
- int (*wind) (call_frame_t *frame, xlator_t *this, int subvol);
+ struct {
+ char *name;
+ } removexattr;
- int (*fop) (call_frame_t *frame, xlator_t *this);
+ struct {
+ dict_t *xattr;
+ gf_xattrop_flags_t optype;
+ } xattrop;
- int (*done) (call_frame_t *frame, xlator_t *this);
+ /* dir write */
- int (*resume) (call_frame_t *frame, xlator_t *this);
+ struct {
+ inode_t *inode;
+ struct iatt buf;
+ struct iatt preparent;
+ struct iatt postparent;
+ struct iatt prenewparent;
+ struct iatt postnewparent;
+ } dir_fop; // common structure for all dir fops
- int (*unwind) (call_frame_t *frame, xlator_t *this);
+ struct {
+ fd_t *fd;
+ dict_t *params;
+ int32_t flags;
+ mode_t mode;
+ } create;
- /* post-op hook */
- } transaction;
+ struct {
+ dict_t *params;
+ dev_t dev;
+ mode_t mode;
+ } mknod;
- syncbarrier_t barrier;
+ struct {
+ dict_t *params;
+ int32_t mode;
+ } mkdir;
- /* extra data for fops */
- dict_t *xdata_req;
- dict_t *xdata_rsp;
+ struct {
+ dict_t *params;
+ char *linkpath;
+ } symlink;
- dict_t *xattr_rsp; /*for [f]xattrop*/
+ struct {
+ off_t offset;
+ size_t len;
+ int32_t mode;
+ } fallocate;
- mode_t umask;
- int xflag;
- gf_boolean_t do_discovery;
- struct afr_reply *replies;
+ struct {
+ off_t offset;
+ size_t len;
+ } discard;
- /* For client side background heals. */
- struct list_head healer;
- call_frame_t *heal_frame;
-} afr_local_t;
+ struct {
+ off_t offset;
+ off_t len;
+ struct iatt prebuf;
+ struct iatt postbuf;
+ } zerofill;
+ struct {
+ char *volume;
+ int32_t cmd;
+ int32_t in_cmd;
+ struct gf_flock in_flock;
+ struct gf_flock flock;
+ void *xdata;
+ } inodelk;
-typedef struct _afr_inode_ctx {
- uint64_t read_subvol;
- int spb_choice;
- gf_timer_t *timer;
-} afr_inode_ctx_t;
+ struct {
+ char *volume;
+ char *basename;
+ void *xdata;
+ entrylk_cmd in_cmd;
+ entrylk_cmd cmd;
+ entrylk_type type;
+ } entrylk;
+
+ struct {
+ off_t offset;
+ gf_seek_what_t what;
+ } seek;
+
+ struct {
+ struct gf_lease user_lease;
+ struct gf_lease ret_lease;
+ unsigned char *locked_nodes;
+ } lease;
+
+ struct {
+ int flags;
+ } rmdir;
+
+ struct {
+ int32_t datasync;
+ } fsync;
+
+ struct {
+ uuid_t gfid_req;
+ gf_boolean_t needs_fresh_lookup;
+ } lookup;
+
+ } cont;
+
+ struct {
+ char *basename;
+ char *new_basename;
+
+ loc_t parent_loc;
+ loc_t new_parent_loc;
+
+ /* stub to resume on destruction
+ of the transaction frame */
+ call_stub_t *resume_stub;
+
+ struct list_head owner_list;
+ struct list_head wait_list;
+
+ unsigned char *pre_op;
+
+ /* Changelog xattr dict for [f]xattrop*/
+ dict_t **changelog_xdata;
+ unsigned char *pre_op_sources;
+
+ /* @failed_subvols: subvolumes on which a pre-op or a
+ FOP failed. */
+ unsigned char *failed_subvols;
+
+ call_frame_t *main_frame; /*Fop frame*/
+ call_frame_t *frame; /*Transaction frame*/
+
+ int (*wind)(call_frame_t *frame, xlator_t *this, int subvol);
+
+ int (*unwind)(call_frame_t *frame, xlator_t *this);
+
+ off_t start, len;
+
+ afr_transaction_type type;
+
+ int32_t in_flight_sb_errno; /* This is where the cause of the
+ failure on the last good copy of
+ the file is stored.
+ */
+
+ /* @changelog_resume: function to be called after changlogging
+ (either pre-op or post-op) is done
+ */
+ afr_changelog_resume_t changelog_resume;
+
+ gf_boolean_t eager_lock_on;
+ gf_boolean_t do_eager_unlock;
+
+ /* @dirtied: flag which indicates whether we set dirty flag
+ in the OP. Typically true when we are performing operation
+ on more than one subvol and optimistic changelog is disabled
+
+ A 'true' value set in @dirtied flag means an 'undirtying'
+ has to be done in POST-OP phase.
+ */
+ gf_boolean_t dirtied;
+
+ /* @inherited: flag which indicates that the dirty flags
+ of the previous transaction were inherited
+ */
+ gf_boolean_t inherited;
+
+ /*
+ @no_uninherit: flag which indicates that a pre_op_uninherit()
+ must _not_ be attempted (and returned as failure) always. This
+ flag is set when a hard pre-op is performed, but not accounted
+ for it in fd_ctx->on_disk[]. Such transactions are "isolated"
+ from the pre-op piggybacking entirely and therefore uninherit
+ must not be attempted.
+ */
+ gf_boolean_t no_uninherit;
+
+ gf_boolean_t in_flight_sb; /* Indicator for occurrence of
+ split-brain while in the middle of
+ a txn. */
+
+ /* @uninherit_done:
+ @uninherit_value:
+
+ The above pair variables make pre_op_uninherit() idempotent.
+ Both are FALSE initially. The first call to pre_op_uninherit
+ sets @uninherit_done to TRUE and the return value to
+ @uninherit_value. Further calls will check for @uninherit_done
+ to be TRUE and if so will simply return @uninherit_value.
+ */
+ gf_boolean_t uninherit_done;
+ gf_boolean_t uninherit_value;
+
+ gf_boolean_t disable_delayed_post_op;
+ } transaction;
+
+ syncbarrier_t barrier;
+
+ /* extra data for fops */
+ dict_t *xdata_req;
+ dict_t *xdata_rsp;
+
+ dict_t *xattr_rsp; /*for [f]xattrop*/
+
+ mode_t umask;
+ int xflag;
+ struct afr_reply *replies;
+
+ /* For client side background heals. */
+ struct list_head healer;
+ call_frame_t *heal_frame;
+
+ afr_inode_ctx_t *inode_ctx;
+
+ /*For thin-arbiter transactions.*/
+ int ta_failed_subvol;
+ int ta_event_gen;
+ struct list_head ta_waitq;
+ struct list_head ta_onwireq;
+ afr_ta_fop_state_t fop_state;
+ afr_fop_lock_state_t fop_lock_state;
+ gf_lkowner_t saved_lk_owner;
+ unsigned char read_txn_query_child;
+ unsigned char ta_child_up;
+ gf_boolean_t do_discovery;
+ gf_boolean_t need_full_crawl;
+ gf_boolean_t is_read_txn;
+ gf_boolean_t is_new_entry;
+} afr_local_t;
typedef struct afr_spbc_timeout {
- call_frame_t *frame;
- gf_boolean_t d_spb;
- gf_boolean_t m_spb;
- loc_t *loc;
- int spb_child_index;
+ call_frame_t *frame;
+ loc_t *loc;
+ int spb_child_index;
+ gf_boolean_t d_spb;
+ gf_boolean_t m_spb;
} afr_spbc_timeout_t;
typedef struct afr_spb_status {
- call_frame_t *frame;
- loc_t *loc;
+ call_frame_t *frame;
+ loc_t *loc;
} afr_spb_status_t;
typedef struct afr_empty_brick_args {
- call_frame_t *frame;
- loc_t loc;
- int empty_index;
- char *op_type;
+ call_frame_t *frame;
+ char *op_type;
+ loc_t loc;
+ int empty_index;
} afr_empty_brick_args_t;
typedef struct afr_read_subvol_args {
- ia_type_t ia_type;
- uuid_t gfid;
+ ia_type_t ia_type;
+ uuid_t gfid;
} afr_read_subvol_args_t;
-/* did a call fail due to a child failing? */
-#define child_went_down(op_ret, op_errno) (((op_ret) < 0) && \
- ((op_errno == ENOTCONN) || \
- (op_errno == EBADFD)))
+typedef struct afr_granular_esh_args {
+ fd_t *heal_fd;
+ xlator_t *xl;
+ call_frame_t *frame;
+ gf_boolean_t mismatch; /* flag to represent occurrence of type/gfid
+ mismatch */
+} afr_granular_esh_args_t;
int
-afr_inode_get_readable (call_frame_t *frame, inode_t *inode, xlator_t *this,
- unsigned char *readable, int *event_p, int type);
+afr_inode_get_readable(call_frame_t *frame, inode_t *inode, xlator_t *this,
+ unsigned char *readable, int *event_p, int type);
int
-afr_inode_read_subvol_get (inode_t *inode, xlator_t *this,
- unsigned char *data_subvols,
- unsigned char *metadata_subvols,
- int *event_generation);
+afr_inode_read_subvol_get(inode_t *inode, xlator_t *this,
+ unsigned char *data_subvols,
+ unsigned char *metadata_subvols,
+ int *event_generation);
int
-__afr_inode_read_subvol_get (inode_t *inode, xlator_t *this,
- unsigned char *data_subvols,
- unsigned char *metadata_subvols,
- int *event_generation);
+__afr_inode_read_subvol_get(inode_t *inode, xlator_t *this,
+ unsigned char *data_subvols,
+ unsigned char *metadata_subvols,
+ int *event_generation);
int
-__afr_inode_read_subvol_set (inode_t *inode, xlator_t *this,
- unsigned char *data_subvols,
- unsigned char *metadata_subvol,
- int event_generation);
+__afr_inode_read_subvol_set(inode_t *inode, xlator_t *this,
+ unsigned char *data_subvols,
+ unsigned char *metadata_subvol,
+ int event_generation);
int
-afr_inode_read_subvol_set (inode_t *inode, xlator_t *this,
- unsigned char *data_subvols,
- unsigned char *metadata_subvols,
- int event_generation);
+afr_inode_read_subvol_set(inode_t *inode, xlator_t *this,
+ unsigned char *data_subvols,
+ unsigned char *metadata_subvols,
+ int event_generation);
int
-afr_inode_read_subvol_reset (inode_t *inode, xlator_t *this);
+__afr_inode_need_refresh_set(inode_t *inode, xlator_t *this);
int
-afr_read_subvol_select_by_policy (inode_t *inode, xlator_t *this,
- unsigned char *readable,
- afr_read_subvol_args_t *args);
+afr_inode_need_refresh_set(inode_t *inode, xlator_t *this);
int
-afr_inode_read_subvol_type_get (inode_t *inode, xlator_t *this,
- unsigned char *readable, int *event_p,
- int type);
+afr_read_subvol_select_by_policy(inode_t *inode, xlator_t *this,
+ unsigned char *readable,
+ afr_read_subvol_args_t *args);
+
+int
+afr_inode_read_subvol_type_get(inode_t *inode, xlator_t *this,
+ unsigned char *readable, int *event_p, int type);
int
-afr_read_subvol_get (inode_t *inode, xlator_t *this, int *subvol_p,
- int *event_p, afr_transaction_type type,
- afr_read_subvol_args_t *args);
+afr_read_subvol_get(inode_t *inode, xlator_t *this, int *subvol_p,
+ unsigned char *readables, int *event_p,
+ afr_transaction_type type, afr_read_subvol_args_t *args);
-#define afr_data_subvol_get(i, t, s, e, a) \
- afr_read_subvol_get(i, t, s, e, AFR_DATA_TRANSACTION, a)
+#define afr_data_subvol_get(i, t, s, r, e, a) \
+ afr_read_subvol_get(i, t, s, r, e, AFR_DATA_TRANSACTION, a)
-#define afr_metadata_subvol_get(i, t, s, e, a) \
- afr_read_subvol_get(i, t, s, e, AFR_METADATA_TRANSACTION, a)
+#define afr_metadata_subvol_get(i, t, s, r, e, a) \
+ afr_read_subvol_get(i, t, s, r, e, AFR_METADATA_TRANSACTION, a)
int
-afr_inode_refresh (call_frame_t *frame, xlator_t *this, inode_t *inode,
- afr_inode_refresh_cbk_t cbk);
+afr_inode_refresh(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ uuid_t gfid, afr_inode_refresh_cbk_t cbk);
int32_t
-afr_notify (xlator_t *this, int32_t event, void *data, void *data2);
+afr_notify(xlator_t *this, int32_t event, void *data, void *data2);
int
-xattr_is_equal (dict_t *this, char *key1, data_t *value1, void *data);
+xattr_is_equal(dict_t *this, char *key1, data_t *value1, void *data);
int
-afr_init_entry_lockee (afr_entry_lockee_t *lockee, afr_local_t *local,
- loc_t *loc, char *basename, int child_count);
+afr_add_entry_lockee(afr_local_t *local, loc_t *loc, char *basename,
+ int child_count);
+
+int
+afr_add_inode_lockee(afr_local_t *local, int child_count);
void
-afr_entry_lockee_cleanup (afr_internal_lock_t *int_lock);
+afr_lockees_cleanup(afr_internal_lock_t *int_lock);
int
-afr_attempt_lock_recovery (xlator_t *this, int32_t child_index);
+afr_attempt_lock_recovery(xlator_t *this, int32_t child_index);
int
-afr_mark_locked_nodes (xlator_t *this, fd_t *fd,
- unsigned char *locked_nodes);
+afr_mark_locked_nodes(xlator_t *this, fd_t *fd, unsigned char *locked_nodes);
void
-afr_set_lk_owner (call_frame_t *frame, xlator_t *this, void *lk_owner);
+afr_set_lk_owner(call_frame_t *frame, xlator_t *this, void *lk_owner);
int
-afr_set_lock_number (call_frame_t *frame, xlator_t *this);
+afr_set_lock_number(call_frame_t *frame, xlator_t *this);
int32_t
-afr_unlock (call_frame_t *frame, xlator_t *this);
-
-int
-afr_nonblocking_entrylk (call_frame_t *frame, xlator_t *this);
+afr_unlock(call_frame_t *frame, xlator_t *this);
int
-afr_nonblocking_inodelk (call_frame_t *frame, xlator_t *this);
+afr_lock_nonblocking(call_frame_t *frame, xlator_t *this);
int
-afr_blocking_lock (call_frame_t *frame, xlator_t *this);
+afr_blocking_lock(call_frame_t *frame, xlator_t *this);
int
-afr_internal_lock_finish (call_frame_t *frame, xlator_t *this);
+afr_internal_lock_finish(call_frame_t *frame, xlator_t *this);
int
-afr_lk_transfer_datalock (call_frame_t *dst, call_frame_t *src, char *dom,
- unsigned int child_count);
-
-int
-__afr_fd_ctx_set (xlator_t *this, fd_t *fd);
-
-int
-afr_fd_ctx_set (xlator_t *this, fd_t *fd);
+__afr_fd_ctx_set(xlator_t *this, fd_t *fd);
afr_fd_ctx_t *
-afr_fd_ctx_get (fd_t *fd, xlator_t *this);
+afr_fd_ctx_get(fd_t *fd, xlator_t *this);
int
-afr_build_parent_loc (loc_t *parent, loc_t *child, int32_t *op_errno);
+afr_build_parent_loc(loc_t *parent, loc_t *child, int32_t *op_errno);
int
-afr_locked_nodes_count (unsigned char *locked_nodes, int child_count);
+afr_locked_nodes_count(unsigned char *locked_nodes, int child_count);
int
-afr_replies_interpret (call_frame_t *frame, xlator_t *this, inode_t *inode,
- gf_boolean_t *start_heal);
+afr_replies_interpret(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ gf_boolean_t *start_heal);
void
-afr_local_replies_wipe (afr_local_t *local, afr_private_t *priv);
+afr_local_replies_wipe(afr_local_t *local, afr_private_t *priv);
void
-afr_local_cleanup (afr_local_t *local, xlator_t *this);
+afr_local_cleanup(afr_local_t *local, xlator_t *this);
int
-afr_frame_return (call_frame_t *frame);
+afr_frame_return(call_frame_t *frame);
int
-afr_open (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
- fd_t *fd, dict_t *xdata);
+afr_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata);
void
-afr_local_transaction_cleanup (afr_local_t *local, xlator_t *this);
+afr_local_transaction_cleanup(afr_local_t *local, xlator_t *this);
int
-afr_cleanup_fd_ctx (xlator_t *this, fd_t *fd);
-
-#define AFR_STACK_UNWIND(fop, frame, params ...) \
- do { \
- afr_local_t *__local = NULL; \
- xlator_t *__this = NULL; \
- if (frame) { \
- __local = frame->local; \
- __this = frame->this; \
- frame->local = NULL; \
- } \
- STACK_UNWIND_STRICT (fop, frame, params); \
- if (__local) { \
- afr_local_cleanup (__local, __this); \
- mem_put (__local); \
- } \
- } while (0)
-
-#define AFR_STACK_DESTROY(frame) \
- do { \
- afr_local_t *__local = NULL; \
- xlator_t *__this = NULL; \
- __local = frame->local; \
- __this = frame->this; \
- frame->local = NULL; \
- STACK_DESTROY (frame->root); \
- if (__local) { \
- afr_local_cleanup (__local, __this); \
- mem_put (__local); \
- } \
- } while (0);
-
-#define AFR_FRAME_INIT(frame, op_errno) \
- ({frame->local = mem_get0 (THIS->local_pool); \
- if (afr_local_init (frame->local, THIS->private, &op_errno)) { \
- afr_local_cleanup (frame->local, THIS); \
- mem_put (frame->local); \
- frame->local = NULL; }; \
- frame->local;})
-
-#define AFR_STACK_RESET(frame) \
- do { \
- afr_local_t *__local = NULL; \
- xlator_t *__this = NULL; \
- __local = frame->local; \
- __this = frame->this; \
- frame->local = NULL; \
- int __opr; \
- STACK_RESET (frame->root); \
- if (__local) { \
- afr_local_cleanup (__local, __this); \
- mem_put (__local); \
- } \
- AFR_FRAME_INIT (frame, __opr); \
- } while (0)
+afr_cleanup_fd_ctx(xlator_t *this, fd_t *fd);
+
+#define AFR_STACK_UNWIND(fop, frame, op_ret, op_errno, params...) \
+ do { \
+ afr_local_t *__local = NULL; \
+ xlator_t *__this = NULL; \
+ int32_t __op_ret = 0; \
+ int32_t __op_errno = 0; \
+ \
+ __op_ret = op_ret; \
+ __op_errno = op_errno; \
+ if (frame) { \
+ __local = frame->local; \
+ __this = frame->this; \
+ afr_handle_inconsistent_fop(frame, &__op_ret, &__op_errno); \
+ if (__local && __local->is_read_txn) \
+ afr_pending_read_decrement(__this->private, \
+ __local->read_subvol); \
+ if (__local && __local->xdata_req && \
+ afr_is_lock_mode_mandatory(__local->xdata_req)) \
+ afr_dom_lock_release(frame); \
+ frame->local = NULL; \
+ } \
+ \
+ STACK_UNWIND_STRICT(fop, frame, __op_ret, __op_errno, params); \
+ if (__local) { \
+ afr_local_cleanup(__local, __this); \
+ mem_put(__local); \
+ } \
+ } while (0)
+
+#define AFR_STACK_DESTROY(frame) \
+ do { \
+ afr_local_t *__local = NULL; \
+ xlator_t *__this = NULL; \
+ __local = frame->local; \
+ __this = frame->this; \
+ frame->local = NULL; \
+ STACK_DESTROY(frame->root); \
+ if (__local) { \
+ afr_local_cleanup(__local, __this); \
+ mem_put(__local); \
+ } \
+ } while (0);
+
+#define AFR_FRAME_INIT(frame, op_errno) \
+ ({ \
+ frame->local = mem_get0(THIS->local_pool); \
+ if (afr_local_init(frame->local, frame->this->private, &op_errno)) { \
+ afr_local_cleanup(frame->local, frame->this); \
+ mem_put(frame->local); \
+ frame->local = NULL; \
+ }; \
+ frame->local; \
+ })
+
+#define AFR_STACK_RESET(frame) \
+ do { \
+ afr_local_t *__local = NULL; \
+ xlator_t *__this = NULL; \
+ __local = frame->local; \
+ __this = frame->this; \
+ frame->local = NULL; \
+ int __opr; \
+ STACK_RESET(frame->root); \
+ if (__local) { \
+ afr_local_cleanup(__local, __this); \
+ mem_put(__local); \
+ } \
+ AFR_FRAME_INIT(frame, __opr); \
+ } while (0)
/* allocate and return a string that is the basename of argument */
static inline char *
-AFR_BASENAME (const char *str)
+AFR_BASENAME(const char *str)
{
- char *__tmp_str = NULL;
- char *__basename_str = NULL;
- __tmp_str = gf_strdup (str);
- __basename_str = gf_strdup (basename (__tmp_str));
- GF_FREE (__tmp_str);
- return __basename_str;
+ char *__tmp_str = NULL;
+ char *__basename_str = NULL;
+ __tmp_str = gf_strdup(str);
+ __basename_str = gf_strdup(basename(__tmp_str));
+ GF_FREE(__tmp_str);
+ return __basename_str;
}
call_frame_t *
-afr_copy_frame (call_frame_t *base);
+afr_copy_frame(call_frame_t *base);
int
-afr_transaction_local_init (afr_local_t *local, xlator_t *this);
+afr_transaction_local_init(afr_local_t *local, xlator_t *this);
int32_t
-afr_marker_getxattr (call_frame_t *frame, xlator_t *this,
- loc_t *loc, const char *name,afr_local_t *local, afr_private_t *priv );
+afr_marker_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, afr_local_t *local, afr_private_t *priv);
int
-afr_local_init (afr_local_t *local, afr_private_t *priv, int32_t *op_errno);
+afr_local_init(afr_local_t *local, afr_private_t *priv, int32_t *op_errno);
int
-afr_internal_lock_init (afr_internal_lock_t *lk, size_t child_count,
- transaction_lk_type_t lk_type);
+afr_internal_lock_init(afr_internal_lock_t *lk, size_t child_count);
int
-afr_higher_errno (int32_t old_errno, int32_t new_errno);
+afr_higher_errno(int32_t old_errno, int32_t new_errno);
int
-afr_final_errno (afr_local_t *local, afr_private_t *priv);
+afr_final_errno(afr_local_t *local, afr_private_t *priv);
int
-afr_xattr_req_prepare (xlator_t *this, dict_t *xattr_req);
+afr_xattr_req_prepare(xlator_t *this, dict_t *xattr_req);
void
-afr_fix_open (fd_t *fd, xlator_t *this);
+afr_fix_open(fd_t *fd, xlator_t *this);
afr_fd_ctx_t *
-afr_fd_ctx_get (fd_t *fd, xlator_t *this);
+afr_fd_ctx_get(fd_t *fd, xlator_t *this);
void
-afr_set_low_priority (call_frame_t *frame);
+afr_set_low_priority(call_frame_t *frame);
int
-afr_child_fd_ctx_set (xlator_t *this, fd_t *fd, int32_t child,
- int flags);
+afr_child_fd_ctx_set(xlator_t *this, fd_t *fd, int32_t child, int flags);
void
-afr_matrix_cleanup (int32_t **pending, unsigned int m);
+afr_matrix_cleanup(int32_t **pending, unsigned int m);
-int32_t**
-afr_matrix_create (unsigned int m, unsigned int n);
+int32_t **
+afr_matrix_create(unsigned int m, unsigned int n);
-int**
-afr_mark_pending_changelog (afr_private_t *priv, unsigned char *pending,
- dict_t *xattr, ia_type_t iat);
+int **
+afr_mark_pending_changelog(afr_private_t *priv, unsigned char *pending,
+ dict_t *xattr, ia_type_t iat);
void
-afr_filter_xattrs (dict_t *xattr);
+afr_filter_xattrs(dict_t *xattr);
/*
* Special value indicating we should use the "auto" quorum method instead of
@@ -1053,71 +1245,179 @@ afr_filter_xattrs (dict_t *xattr);
#define AFR_QUORUM_AUTO INT_MAX
int
-afr_fd_report_unstable_write (xlator_t *this, fd_t *fd);
+afr_fd_report_unstable_write(xlator_t *this, afr_local_t *local);
+
+gf_boolean_t
+afr_fd_has_witnessed_unstable_write(xlator_t *this, inode_t *inode);
+
+void
+afr_reply_wipe(struct afr_reply *reply);
+
+void
+afr_replies_wipe(struct afr_reply *replies, int count);
+
+gf_boolean_t
+afr_xattrs_are_equal(dict_t *dict1, dict_t *dict2);
+
+gf_boolean_t
+afr_is_xattr_ignorable(char *key);
+
+int
+afr_get_heal_info(call_frame_t *frame, xlator_t *this, loc_t *loc);
+
+int
+afr_heal_splitbrain_file(call_frame_t *frame, xlator_t *this, loc_t *loc);
+
+int
+afr_get_split_brain_status(void *opaque);
+
+int
+afr_get_split_brain_status_cbk(int ret, call_frame_t *frame, void *opaque);
+
+int
+afr_inode_split_brain_choice_set(inode_t *inode, xlator_t *this,
+ int spb_choice);
+int
+afr_split_brain_read_subvol_get(inode_t *inode, xlator_t *this,
+ call_frame_t *frame, int *spb_subvol);
+int
+afr_get_child_index_from_name(xlator_t *this, char *name);
+
+int
+afr_is_split_brain(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ uuid_t gfid, gf_boolean_t *d_spb, gf_boolean_t *m_spb);
+int
+afr_spb_choice_timeout_cancel(xlator_t *this, inode_t *inode);
+
+int
+afr_set_split_brain_choice(int ret, call_frame_t *frame, void *opaque);
gf_boolean_t
-afr_fd_has_witnessed_unstable_write (xlator_t *this, fd_t *fd);
+afr_get_need_heal(xlator_t *this);
void
-afr_delayed_changelog_wake_resume (xlator_t *this, fd_t *fd, call_stub_t *stub);
+afr_set_need_heal(xlator_t *this, afr_local_t *local);
+
+int
+afr_selfheal_data_open(xlator_t *this, inode_t *inode, fd_t **fd);
+
+int
+afr_get_msg_id(char *op_type);
int
-afr_inodelk_init (afr_inodelk_t *lk, char *dom, size_t child_count);
+afr_set_in_flight_sb_status(xlator_t *this, call_frame_t *frame,
+ inode_t *inode);
+int32_t
+afr_quorum_errno(afr_private_t *priv);
+
+gf_boolean_t
+afr_is_consistent_io_possible(afr_local_t *local, afr_private_t *priv,
+ int32_t *op_errno);
void
-afr_handle_open_fd_count (call_frame_t *frame, xlator_t *this);
+afr_handle_inconsistent_fop(call_frame_t *frame, int32_t *op_ret,
+ int32_t *op_errno);
void
-afr_remove_eager_lock_stub (afr_local_t *local);
+afr_inode_write_fill(call_frame_t *frame, xlator_t *this, int child_index,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata);
+void
+afr_process_post_writev(call_frame_t *frame, xlator_t *this);
void
-afr_replies_wipe (struct afr_reply *replies, int count);
+afr_writev_unwind(call_frame_t *frame, xlator_t *this);
-gf_boolean_t
-afr_xattrs_are_equal (dict_t *dict1, dict_t *dict2);
+void
+afr_writev_copy_outvars(call_frame_t *src_frame, call_frame_t *dst_frame);
+
+void
+afr_update_uninodelk(afr_local_t *local, afr_internal_lock_t *int_lock,
+ int32_t child_index);
+afr_fd_ctx_t *
+__afr_fd_ctx_get(fd_t *fd, xlator_t *this);
gf_boolean_t
-afr_is_xattr_ignorable (char *key);
+afr_is_inode_refresh_reqd(inode_t *inode, xlator_t *this, int event_gen1,
+ int event_gen2);
int
-afr_get_heal_info (call_frame_t *frame, xlator_t *this, loc_t *loc);
+afr_serialize_xattrs_with_delimiter(call_frame_t *frame, xlator_t *this,
+ char *buf, const char *default_str,
+ int32_t *serz_len, char delimiter);
+gf_boolean_t
+afr_is_symmetric_error(call_frame_t *frame, xlator_t *this);
int
-afr_heal_splitbrain_file(call_frame_t *frame, xlator_t *this, loc_t *loc);
+__afr_inode_ctx_get(xlator_t *this, inode_t *inode, afr_inode_ctx_t **ctx);
-int
-afr_get_split_brain_status (void *opaque);
+uint64_t
+afr_write_subvol_get(call_frame_t *frame, xlator_t *this);
int
-afr_get_split_brain_status_cbk (int ret, call_frame_t *frame, void *opaque);
+afr_write_subvol_set(call_frame_t *frame, xlator_t *this);
int
-afr_inode_split_brain_choice_set (inode_t *inode, xlator_t *this,
- int spb_choice);
+afr_write_subvol_reset(call_frame_t *frame, xlator_t *this);
+
int
-afr_inode_split_brain_choice_get (inode_t *inode, xlator_t *this,
- int *spb_choice);
+afr_set_inode_local(xlator_t *this, afr_local_t *local, inode_t *inode);
+
int
-afr_get_child_index_from_name (xlator_t *this, char *name);
+afr_fill_ta_loc(xlator_t *this, loc_t *loc, gf_boolean_t is_gfid_based_fop);
int
-afr_is_split_brain (call_frame_t *frame, xlator_t *this, inode_t *inode,
- uuid_t gfid, gf_boolean_t *d_spb, gf_boolean_t *m_spb);
+afr_ta_post_op_lock(xlator_t *this, loc_t *loc);
+
int
-afr_spb_choice_timeout_cancel (xlator_t *this, inode_t *inode);
+afr_ta_post_op_unlock(xlator_t *this, loc_t *loc);
+
+gf_boolean_t
+afr_is_pending_set(xlator_t *this, dict_t *xdata, int type);
int
-afr_set_split_brain_choice (int ret, call_frame_t *frame, void *opaque);
+__afr_get_up_children_count(afr_private_t *priv);
+
+call_frame_t *
+afr_ta_frame_create(xlator_t *this);
gf_boolean_t
-afr_get_need_heal (xlator_t *this);
+afr_ta_has_quorum(afr_private_t *priv, afr_local_t *local);
void
-afr_set_need_heal (xlator_t *this, afr_local_t *local);
+afr_ta_lock_release_synctask(xlator_t *this);
-int
-afr_selfheal_data_open (xlator_t *this, inode_t *inode, fd_t **fd);
+void
+afr_ta_locked_priv_invalidate(afr_private_t *priv);
-int
-afr_get_msg_id (char *op_type);
+gf_boolean_t
+afr_lookup_has_quorum(call_frame_t *frame,
+ const unsigned int up_children_count);
+
+void
+afr_mark_new_entry_changelog(call_frame_t *frame, xlator_t *this);
+
+void
+afr_handle_replies_quorum(call_frame_t *frame, xlator_t *this);
+
+gf_boolean_t
+afr_ta_dict_contains_pending_xattr(dict_t *dict, afr_private_t *priv,
+ int child);
+
+void
+afr_selfheal_childup(xlator_t *this, afr_private_t *priv);
+
+gf_boolean_t
+afr_is_lock_mode_mandatory(dict_t *xdata);
+
+void
+afr_dom_lock_release(call_frame_t *frame);
+
+void
+afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
+ unsigned char *replies);
+
+gf_boolean_t
+afr_is_private_directory(afr_private_t *priv, uuid_t pargfid, const char *name,
+ pid_t pid);
#endif /* __AFR_H__ */
diff --git a/xlators/cluster/afr/src/pump.c b/xlators/cluster/afr/src/pump.c
deleted file mode 100644
index ef299ec5855..00000000000
--- a/xlators/cluster/afr/src/pump.c
+++ /dev/null
@@ -1,2470 +0,0 @@
-/*
- Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include <unistd.h>
-#include <sys/time.h>
-#include <stdlib.h>
-#include <fnmatch.h>
-
-#include "afr-common.c"
-#include "defaults.h"
-#include "glusterfs.h"
-#include "pump.h"
-#include "afr-messages.h"
-
-
-static int
-afr_set_dict_gfid (dict_t *dict, uuid_t gfid)
-{
- int ret = 0;
- uuid_t *pgfid = NULL;
-
- GF_ASSERT (gfid);
-
- pgfid = GF_CALLOC (1, sizeof (uuid_t), gf_common_mt_char);
- if (!pgfid) {
- ret = -1;
- goto out;
- }
-
- gf_uuid_copy (*pgfid, gfid);
-
- ret = dict_set_dynptr (dict, "gfid-req", pgfid, sizeof (uuid_t));
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, -ret,
- AFR_MSG_DICT_SET_FAILED, "gfid set failed");
-
-out:
- if (ret && pgfid)
- GF_FREE (pgfid);
- return ret;
-}
-
-static int
-afr_set_root_gfid (dict_t *dict)
-{
- uuid_t gfid;
- int ret = 0;
-
- memset (gfid, 0, 16);
- gfid[15] = 1;
-
- ret = afr_set_dict_gfid (dict, gfid);
-
- return ret;
-}
-
-static int
-afr_build_child_loc (xlator_t *this, loc_t *child, loc_t *parent, char *name)
-{
- int ret = -1;
- uuid_t pargfid = {0};
-
- if (!child)
- goto out;
-
- if (!gf_uuid_is_null (parent->inode->gfid))
- gf_uuid_copy (pargfid, parent->inode->gfid);
- else if (!gf_uuid_is_null (parent->gfid))
- gf_uuid_copy (pargfid, parent->gfid);
-
- if (gf_uuid_is_null (pargfid))
- goto out;
-
- if (strcmp (parent->path, "/") == 0)
- ret = gf_asprintf ((char **)&child->path, "/%s", name);
- else
- ret = gf_asprintf ((char **)&child->path, "%s/%s", parent->path,
- name);
-
- if (-1 == ret) {
- }
-
- child->name = strrchr (child->path, '/');
- if (child->name)
- child->name++;
-
- child->parent = inode_ref (parent->inode);
- child->inode = inode_new (parent->inode->table);
- gf_uuid_copy (child->pargfid, pargfid);
-
- if (!child->inode) {
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- if ((ret == -1) && child)
- loc_wipe (child);
-
- return ret;
-}
-
-static void
-afr_build_root_loc (xlator_t *this, loc_t *loc)
-{
- afr_private_t *priv = NULL;
-
- priv = this->private;
- loc->path = gf_strdup ("/");
- loc->name = "";
- loc->inode = inode_ref (priv->root_inode);
- gf_uuid_copy (loc->gfid, loc->inode->gfid);
-}
-
-static void
-afr_update_loc_gfids (loc_t *loc, struct iatt *buf, struct iatt *postparent)
-{
- GF_ASSERT (loc);
- GF_ASSERT (buf);
-
- gf_uuid_copy (loc->gfid, buf->ia_gfid);
- if (postparent)
- gf_uuid_copy (loc->pargfid, postparent->ia_gfid);
-}
-
-static uint64_t pump_pid = 0;
-static void
-pump_fill_loc_info (loc_t *loc, struct iatt *iatt, struct iatt *parent)
-{
- afr_update_loc_gfids (loc, iatt, parent);
- gf_uuid_copy (loc->inode->gfid, iatt->ia_gfid);
-}
-
-static int
-pump_mark_start_pending (xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- pump_priv->pump_start_pending = 1;
-
- return 0;
-}
-
-static int
-is_pump_start_pending (xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- return (pump_priv->pump_start_pending);
-}
-
-static int
-pump_remove_start_pending (xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- pump_priv->pump_start_pending = 0;
-
- return 0;
-}
-
-static pump_state_t
-pump_get_state ()
-{
- xlator_t *this = NULL;
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- pump_state_t ret;
-
- this = THIS;
- priv = this->private;
- pump_priv = priv->pump_private;
-
- LOCK (&pump_priv->pump_state_lock);
- {
- ret = pump_priv->pump_state;
- }
- UNLOCK (&pump_priv->pump_state_lock);
-
- return ret;
-}
-
-int
-pump_change_state (xlator_t *this, pump_state_t state)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- pump_state_t state_old;
- pump_state_t state_new;
-
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- GF_ASSERT (pump_priv);
-
- LOCK (&pump_priv->pump_state_lock);
- {
- state_old = pump_priv->pump_state;
- state_new = state;
-
- pump_priv->pump_state = state;
-
- }
- UNLOCK (&pump_priv->pump_state_lock);
-
- gf_msg_debug (this->name, 0,
- "Pump changing state from %d to %d",
- state_old, state_new);
-
- return 0;
-}
-
-static int
-pump_set_resume_path (xlator_t *this, const char *path)
-{
- int ret = 0;
-
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- GF_ASSERT (pump_priv);
-
- LOCK (&pump_priv->resume_path_lock);
- {
- strncpy (pump_priv->resume_path, path, strlen (path) + 1);
- }
- UNLOCK (&pump_priv->resume_path_lock);
-
- return ret;
-}
-
-static int
-pump_save_path (xlator_t *this, const char *path)
-{
- afr_private_t *priv = NULL;
- pump_state_t state;
- dict_t *dict = NULL;
- loc_t loc = {0};
- int dict_ret = 0;
- int ret = -1;
-
- state = pump_get_state ();
- if (state == PUMP_STATE_RESUME)
- return 0;
-
- priv = this->private;
-
- GF_ASSERT (priv->root_inode);
-
- afr_build_root_loc (this, &loc);
-
- dict = dict_new ();
- dict_ret = dict_set_str (dict, PUMP_PATH, (char *)path);
- if (dict_ret)
- gf_msg (this->name, GF_LOG_WARNING,
- -dict_ret, AFR_MSG_DICT_SET_FAILED,
- "%s: failed to set the key %s", path, PUMP_PATH);
-
- ret = syncop_setxattr (PUMP_SOURCE_CHILD (this), &loc, dict, 0, NULL,
- NULL);
-
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_INFO, -ret, AFR_MSG_INFO_COMMON,
- "setxattr failed - could not save path=%s", path);
- } else {
- gf_msg_debug (this->name, 0,
- "setxattr succeeded - saved path=%s", path);
- }
-
- dict_unref (dict);
-
- loc_wipe (&loc);
- return 0;
-}
-
-static int
-pump_check_and_update_status (xlator_t *this)
-{
- pump_state_t state;
- int ret = -1;
-
- state = pump_get_state ();
-
- switch (state) {
-
- case PUMP_STATE_RESUME:
- case PUMP_STATE_RUNNING:
- {
- ret = 0;
- break;
- }
- case PUMP_STATE_PAUSE:
- {
- ret = -1;
- break;
- }
- case PUMP_STATE_ABORT:
- {
- pump_save_path (this, "/");
- ret = -1;
- break;
- }
- default:
- {
- gf_msg_debug (this->name, 0,
- "Unknown pump state");
- ret = -1;
- break;
- }
-
- }
-
- return ret;
-}
-
-static const char *
-pump_get_resume_path (xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- const char *resume_path = NULL;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- resume_path = pump_priv->resume_path;
-
- return resume_path;
-}
-
-static int
-pump_update_resume_state (xlator_t *this, const char *path)
-{
- pump_state_t state;
- const char *resume_path = NULL;
-
- state = pump_get_state ();
-
- if (state == PUMP_STATE_RESUME) {
- resume_path = pump_get_resume_path (this);
- if (strcmp (resume_path, "/") == 0) {
- gf_msg_debug (this->name, 0,
- "Reached the resume path (/). Proceeding to change state"
- " to running");
-
- pump_change_state (this, PUMP_STATE_RUNNING);
- } else if (strcmp (resume_path, path) == 0) {
- gf_msg_debug (this->name, 0,
- "Reached the resume path. Proceeding to change state"
- " to running");
-
- pump_change_state (this, PUMP_STATE_RUNNING);
- } else {
- gf_msg_debug (this->name, 0,
- "Not yet hit the resume path:res-path=%s,path=%s",
- resume_path, path);
- }
- }
-
- return 0;
-}
-
-static gf_boolean_t
-is_pump_traversal_allowed (xlator_t *this, const char *path)
-{
- pump_state_t state;
- const char *resume_path = NULL;
- gf_boolean_t ret = _gf_true;
-
- state = pump_get_state ();
-
- if (state == PUMP_STATE_RESUME) {
- resume_path = pump_get_resume_path (this);
- if (strstr (resume_path, path)) {
- gf_msg_debug (this->name, 0,
- "On the right path to resumption path");
- ret = _gf_true;
- } else {
- gf_msg_debug (this->name, 0,
- "Not the right path to resuming=> ignoring traverse");
- ret = _gf_false;
- }
- }
-
- return ret;
-}
-
-static int
-pump_save_file_stats (xlator_t *this, const char *path)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- LOCK (&pump_priv->resume_path_lock);
- {
- pump_priv->number_files_pumped++;
-
- strncpy (pump_priv->current_file, path,
- PATH_MAX);
- }
- UNLOCK (&pump_priv->resume_path_lock);
-
- return 0;
-}
-
-static int
-gf_pump_traverse_directory (loc_t *loc)
-{
- xlator_t *this = NULL;
- fd_t *fd = NULL;
- off_t offset = 0;
- loc_t entry_loc = {0};
- gf_dirent_t *entry = NULL;
- gf_dirent_t *tmp = NULL;
- gf_dirent_t entries;
- struct iatt iatt = {0};
- struct iatt parent = {0};
- dict_t *xattr_rsp = NULL;
- int ret = 0;
- gf_boolean_t is_directory_empty = _gf_true;
- gf_boolean_t free_entries = _gf_false;
-
- INIT_LIST_HEAD (&entries.list);
- this = THIS;
-
- GF_ASSERT (loc->inode);
-
- fd = fd_create (loc->inode, pump_pid);
- if (!fd) {
- gf_msg (this->name, GF_LOG_ERROR, 0, AFR_MSG_FD_CREATE_FAILED,
- "Failed to create fd for %s", loc->path);
- goto out;
- }
-
- ret = syncop_opendir (this, loc, fd, NULL, NULL);
- if (ret < 0) {
- gf_msg_debug (this->name, 0,
- "opendir failed on %s", loc->path);
- goto out;
- }
-
- gf_msg_trace (this->name, 0,
- "pump opendir on %s returned=%d",
- loc->path, ret);
-
- while (syncop_readdirp (this, fd, 131072, offset, &entries, NULL,
- NULL)) {
- free_entries = _gf_true;
-
- if (list_empty (&entries.list)) {
- gf_msg_trace (this->name, 0,
- "no more entries in directory");
- goto out;
- }
-
- list_for_each_entry_safe (entry, tmp, &entries.list, list) {
- gf_msg_debug (this->name, 0,
- "found readdir entry=%s", entry->d_name);
-
- offset = entry->d_off;
- if (gf_uuid_is_null (entry->d_stat.ia_gfid)) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_GFID_NULL, "%s/%s: No "
- "gfid present skipping",
- loc->path, entry->d_name);
- continue;
- }
- loc_wipe (&entry_loc);
- ret = afr_build_child_loc (this, &entry_loc, loc,
- entry->d_name);
- if (ret)
- goto out;
-
- if ((strcmp (entry->d_name, ".") == 0) ||
- (strcmp (entry->d_name, "..") == 0))
- continue;
-
- is_directory_empty = _gf_false;
- gf_msg_debug (this->name, 0,
- "lookup %s => %"PRId64,
- entry_loc.path,
- iatt.ia_ino);
-
- ret = syncop_lookup (this, &entry_loc, &iatt, &parent,
- NULL, &xattr_rsp);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- -ret, AFR_MSG_INFO_COMMON,
- "%s: lookup failed", entry_loc.path);
- continue;
- }
-
- ret = afr_selfheal_name (this, loc->gfid, entry->d_name,
- NULL);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SELF_HEAL_FAILED,
- "%s: name self-heal failed (%s/%s)",
- entry_loc.path, uuid_utoa (loc->gfid),
- entry->d_name);
- continue;
- }
-
- ret = afr_selfheal (this, iatt.ia_gfid);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_SELF_HEAL_FAILED,
- "%s: self-heal failed (%s)",
- entry_loc.path,
- uuid_utoa (iatt.ia_gfid));
- continue;
- }
-
- pump_fill_loc_info (&entry_loc, &iatt, &parent);
-
- pump_update_resume_state (this, entry_loc.path);
-
- pump_save_path (this, entry_loc.path);
- pump_save_file_stats (this, entry_loc.path);
-
- ret = pump_check_and_update_status (this);
- if (ret < 0) {
- gf_msg_debug (this->name, 0,
- "Pump beginning to exit out");
- goto out;
- }
-
- if (IA_ISDIR (iatt.ia_type)) {
- if (is_pump_traversal_allowed (this, entry_loc.path)) {
- gf_msg_trace (this->name, 0,
- "entering dir=%s",
- entry->d_name);
- gf_pump_traverse_directory (&entry_loc);
- }
- }
- }
-
- gf_dirent_free (&entries);
- free_entries = _gf_false;
- gf_msg_trace (this->name, 0, "offset incremented to %d",
- (int32_t) offset);
-
- }
-
- ret = syncop_close (fd);
- if (ret < 0)
- gf_msg_debug (this->name, 0, "closing the fd failed");
-
- if (is_directory_empty && (strcmp (loc->path, "/") == 0)) {
- pump_change_state (this, PUMP_STATE_RUNNING);
- gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_INFO_COMMON,
- "Empty source brick. Nothing to be done.");
- }
-
-out:
- if (entry_loc.path)
- loc_wipe (&entry_loc);
- if (free_entries)
- gf_dirent_free (&entries);
- return 0;
-}
-
-static int
-pump_update_resume_path (xlator_t *this)
-{
- const char *resume_path = NULL;
-
- resume_path = pump_get_resume_path (this);
-
- if (resume_path) {
- gf_msg_debug (this->name, 0,
- "Found a path to resume from: %s",
- resume_path);
-
- }else {
- gf_msg_debug (this->name, 0,
- "Did not find a path=> setting to '/'");
- pump_set_resume_path (this, "/");
- }
-
- pump_change_state (this, PUMP_STATE_RESUME);
-
- return 0;
-}
-
-static int32_t
-pump_xattr_cleaner (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- loc_t loc = {0};
- int i = 0;
- int ret = 0;
- int source = 0;
- int sink = 1;
-
- priv = this->private;
-
- afr_build_root_loc (this, &loc);
-
- ret = syncop_removexattr (priv->children[source], &loc,
- PUMP_PATH, 0, NULL);
-
- ret = syncop_removexattr (priv->children[sink], &loc,
- PUMP_SINK_COMPLETE, 0, NULL);
-
- for (i = 0; i < priv->child_count; i++) {
- ret = syncop_removexattr (priv->children[i], &loc,
- PUMP_SOURCE_COMPLETE, 0, NULL);
- if (ret) {
- gf_msg_debug (this->name, 0, "removexattr "
- "failed with %s", strerror (-ret));
- }
- }
-
- loc_wipe (&loc);
- return pump_command_reply (frame, this);
-}
-
-static int
-pump_complete_migration (xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
- dict_t *dict = NULL;
- pump_state_t state;
- loc_t loc = {0};
- int dict_ret = 0;
- int ret = -1;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- GF_ASSERT (priv->root_inode);
-
- afr_build_root_loc (this, &loc);
-
- dict = dict_new ();
-
- state = pump_get_state ();
- if (state == PUMP_STATE_RUNNING) {
- gf_msg_debug (this->name, 0,
- "Pump finished pumping");
-
- pump_priv->pump_finished = _gf_true;
-
- dict_ret = dict_set_str (dict, PUMP_SOURCE_COMPLETE, "jargon");
- if (dict_ret)
- gf_msg (this->name, GF_LOG_WARNING, -dict_ret,
- AFR_MSG_DICT_SET_FAILED,
- "%s: failed to set the key %s",
- loc.path, PUMP_SOURCE_COMPLETE);
-
- ret = syncop_setxattr (PUMP_SOURCE_CHILD (this), &loc, dict, 0,
- NULL, NULL);
- if (ret < 0) {
- gf_msg_debug (this->name, 0,
- "setxattr failed - while "
- "notifying source complete");
- }
- dict_ret = dict_set_str (dict, PUMP_SINK_COMPLETE, "jargon");
- if (dict_ret)
- gf_msg (this->name, GF_LOG_WARNING, -dict_ret,
- AFR_MSG_DICT_SET_FAILED,
- "%s: failed to set the key %s",
- loc.path, PUMP_SINK_COMPLETE);
-
- ret = syncop_setxattr (PUMP_SINK_CHILD (this), &loc, dict, 0,
- NULL, NULL);
- if (ret < 0) {
- gf_msg_debug (this->name, 0,
- "setxattr failed - while "
- "notifying sink complete");
- }
-
- pump_save_path (this, "/");
-
- } else if (state == PUMP_STATE_ABORT) {
- gf_msg_debug (this->name, 0, "Starting cleanup "
- "of pump internal xattrs");
- call_resume (pump_priv->cleaner);
- }
-
- loc_wipe (&loc);
- return 0;
-}
-
-static int
-pump_lookup_sink (loc_t *loc)
-{
- xlator_t *this = NULL;
- struct iatt iatt, parent;
- dict_t *xattr_rsp;
- dict_t *xattr_req = NULL;
- int ret = 0;
-
- this = THIS;
-
- xattr_req = dict_new ();
-
- ret = afr_set_root_gfid (xattr_req);
- if (ret)
- goto out;
-
- ret = syncop_lookup (PUMP_SINK_CHILD (this), loc, &iatt, &parent,
- xattr_req, &xattr_rsp);
-
- if (ret) {
- gf_msg_debug (this->name, 0,
- "Lookup on sink child failed");
- ret = -1;
- goto out;
- }
-
-out:
- if (xattr_req)
- dict_unref (xattr_req);
-
- return ret;
-}
-
-static int
-pump_task (void *data)
-{
- xlator_t *this = NULL;
- afr_private_t *priv = NULL;
-
-
- loc_t loc = {0};
- struct iatt iatt, parent;
- dict_t *xattr_rsp = NULL;
- dict_t *xattr_req = NULL;
-
- int ret = -1;
-
- this = THIS;
- priv = this->private;
-
- GF_ASSERT (priv->root_inode);
-
- afr_build_root_loc (this, &loc);
- xattr_req = dict_new ();
- if (!xattr_req) {
- gf_msg_debug (this->name, ENOMEM,
- "Out of memory");
- ret = -1;
- goto out;
- }
-
- afr_set_root_gfid (xattr_req);
- ret = syncop_lookup (this, &loc, &iatt, &parent,
- xattr_req, &xattr_rsp);
-
- gf_msg_trace (this->name, 0,
- "lookup: path=%s gfid=%s",
- loc.path, uuid_utoa (loc.inode->gfid));
-
- ret = pump_check_and_update_status (this);
- if (ret < 0) {
- goto out;
- }
-
- pump_update_resume_path (this);
-
- afr_set_root_gfid (xattr_req);
- ret = pump_lookup_sink (&loc);
- if (ret) {
- pump_update_resume_path (this);
- goto out;
- }
-
- gf_pump_traverse_directory (&loc);
-
- pump_complete_migration (this);
-out:
- if (xattr_req)
- dict_unref (xattr_req);
-
- loc_wipe (&loc);
- return 0;
-}
-
-
-static int
-pump_task_completion (int ret, call_frame_t *sync_frame, void *data)
-{
- xlator_t *this = NULL;
- afr_private_t *priv = NULL;
-
- this = THIS;
-
- priv = this->private;
-
- inode_unref (priv->root_inode);
- STACK_DESTROY (sync_frame->root);
-
- gf_msg_debug (this->name, 0,
- "Pump xlator exiting");
- return 0;
-}
-
-int
-pump_start (call_frame_t *pump_frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- int ret = -1;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- afr_set_lk_owner (pump_frame, this, pump_frame->root);
- pump_pid = (uint64_t) (unsigned long)pump_frame->root;
-
- ret = synctask_new (pump_priv->env, pump_task,
- pump_task_completion,
- pump_frame, NULL);
- if (ret == -1) {
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "setting pump as started lk_owner: %s %"PRIu64,
- lkowner_utoa (&pump_frame->root->lk_owner), pump_pid);
-
- priv->use_afr_in_pump = 1;
-out:
- return ret;
-}
-
-static int
-pump_start_synctask (xlator_t *this)
-{
- call_frame_t *frame = NULL;
- int ret = 0;
-
- frame = create_frame (this, this->ctx->pool);
- if (!frame) {
- ret = -1;
- goto out;
- }
-
- pump_change_state (this, PUMP_STATE_RUNNING);
-
- ret = pump_start (frame, this);
-
-out:
- return ret;
-}
-
-int32_t
-pump_cmd_start_setxattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno, dict_t *xdata)
-
-{
- call_frame_t *prev = NULL;
- afr_local_t *local = NULL;
- int ret = 0;
-
- local = frame->local;
-
- if (op_ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- AFR_MSG_INFO_COMMON,
- "Could not initiate destination "
- "brick connect");
- ret = op_ret;
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Successfully initiated destination "
- "brick connect");
-
- pump_mark_start_pending (this);
-
- /* send the PARENT_UP as pump is ready now */
- prev = cookie;
- if (prev && prev->this)
- prev->this->notify (prev->this, GF_EVENT_PARENT_UP, this);
-
-out:
- local->op_ret = ret;
- pump_command_reply (frame, this);
-
- return 0;
-}
-
-static int
-pump_initiate_sink_connect (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
- afr_private_t *priv = NULL;
- dict_t *dict = NULL;
- data_t *data = NULL;
- char *clnt_cmd = NULL;
- loc_t loc = {0};
-
- int ret = 0;
-
- priv = this->private;
- local = frame->local;
-
- GF_ASSERT (priv->root_inode);
-
- afr_build_root_loc (this, &loc);
-
- data = data_ref (dict_get (local->dict, RB_PUMP_CMD_START));
- if (!data) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- AFR_MSG_DICT_GET_FAILED,
- "Could not get destination brick value");
- goto out;
- }
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- clnt_cmd = GF_CALLOC (1, data->len+1, gf_common_mt_char);
- if (!clnt_cmd) {
- ret = -1;
- goto out;
- }
-
- memcpy (clnt_cmd, data->data, data->len);
- clnt_cmd[data->len] = '\0';
- gf_msg_debug (this->name, 0, "Got destination brick %s\n",
- clnt_cmd);
-
- ret = dict_set_dynstr (dict, CLIENT_CMD_CONNECT, clnt_cmd);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, -ret,
- AFR_MSG_DICT_SET_FAILED,
- "Could not inititiate destination brick "
- "connect");
- goto out;
- }
-
- STACK_WIND (frame,
- pump_cmd_start_setxattr_cbk,
- PUMP_SINK_CHILD(this),
- PUMP_SINK_CHILD(this)->fops->setxattr,
- &loc,
- dict,
- 0, NULL);
-
- ret = 0;
-
-out:
- if (dict)
- dict_unref (dict);
-
- if (data)
- data_unref (data);
-
- if (ret && clnt_cmd)
- GF_FREE (clnt_cmd);
-
- loc_wipe (&loc);
- return ret;
-}
-
-static int
-is_pump_aborted (xlator_t *this)
-{
- pump_state_t state;
-
- state = pump_get_state ();
-
- return ((state == PUMP_STATE_ABORT));
-}
-
-int32_t
-pump_cmd_start_getxattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- dict_t *dict, dict_t *xdata)
-{
- afr_local_t *local = NULL;
- char *path = NULL;
-
- pump_state_t state;
- int ret = 0;
- int need_unwind = 0;
- int dict_ret = -1;
-
- local = frame->local;
-
- if (op_ret < 0) {
- gf_msg_debug (this->name, 0,
- "getxattr failed - changing pump "
- "state to RUNNING with '/'");
- path = "/";
- ret = op_ret;
- } else {
- gf_msg_trace (this->name, 0,
- "getxattr succeeded");
-
- dict_ret = dict_get_str (dict, PUMP_PATH, &path);
- if (dict_ret < 0)
- path = "/";
- }
-
- state = pump_get_state ();
- if ((state == PUMP_STATE_RUNNING) ||
- (state == PUMP_STATE_RESUME)) {
- gf_msg (this->name, GF_LOG_ERROR,
- 0, AFR_MSG_PUMP_XLATOR_ERROR,
- "Pump is already started");
- ret = -1;
- goto out;
- }
-
- pump_set_resume_path (this, path);
-
- if (is_pump_aborted (this))
- /* We're re-starting pump afresh */
- ret = pump_initiate_sink_connect (frame, this);
- else {
- /* We're re-starting pump from a previous
- pause */
- gf_msg_debug (this->name, 0,
- "about to start synctask");
- ret = pump_start_synctask (this);
- need_unwind = 1;
- }
-
-out:
- if ((ret < 0) || (need_unwind == 1)) {
- local->op_ret = ret;
- pump_command_reply (frame, this);
- }
- return 0;
-}
-
-int
-pump_execute_status (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- uint64_t number_files = 0;
-
- char filename[PATH_MAX];
- char summary[PATH_MAX+256];
- char *dict_str = NULL;
-
- int32_t op_ret = 0;
- int32_t op_errno = 0;
-
- dict_t *dict = NULL;
- int ret = -1;
-
- priv = this->private;
- pump_priv = priv->pump_private;
-
- LOCK (&pump_priv->resume_path_lock);
- {
- number_files = pump_priv->number_files_pumped;
- strncpy (filename, pump_priv->current_file, PATH_MAX);
- }
- UNLOCK (&pump_priv->resume_path_lock);
-
- dict_str = GF_CALLOC (1, PATH_MAX + 256, gf_afr_mt_char);
- if (!dict_str) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto out;
- }
-
- if (pump_priv->pump_finished) {
- snprintf (summary, PATH_MAX+256,
- "no_of_files=%"PRIu64, number_files);
- } else {
- snprintf (summary, PATH_MAX+256,
- "no_of_files=%"PRIu64":current_file=%s",
- number_files, filename);
- }
- snprintf (dict_str, PATH_MAX+256, "status=%d:%s",
- (pump_priv->pump_finished)?1:0, summary);
-
- dict = dict_new ();
-
- ret = dict_set_dynstr (dict, RB_PUMP_CMD_STATUS, dict_str);
- if (ret < 0) {
- gf_msg_debug (this->name, 0,
- "dict_set_dynstr returned negative value");
- } else {
- dict_str = NULL;
- }
-
- op_ret = 0;
-
-out:
-
- AFR_STACK_UNWIND (getxattr, frame, op_ret, op_errno, dict, NULL);
-
- if (dict)
- dict_unref (dict);
-
- GF_FREE (dict_str);
-
- return 0;
-}
-
-int
-pump_execute_pause (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
-
- local = frame->local;
-
- pump_change_state (this, PUMP_STATE_PAUSE);
-
- local->op_ret = 0;
- pump_command_reply (frame, this);
-
- return 0;
-}
-
-int
-pump_execute_start (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- afr_local_t *local = NULL;
-
- int ret = 0;
- loc_t loc = {0};
-
- priv = this->private;
- local = frame->local;
-
- if (!priv->root_inode) {
- gf_msg (this->name, GF_LOG_ERROR,
- 0, AFR_MSG_PUMP_XLATOR_ERROR,
- "Pump xlator cannot be started without an initial "
- "lookup");
- ret = -1;
- goto out;
- }
-
- GF_ASSERT (priv->root_inode);
-
- afr_build_root_loc (this, &loc);
-
- STACK_WIND (frame,
- pump_cmd_start_getxattr_cbk,
- PUMP_SOURCE_CHILD(this),
- PUMP_SOURCE_CHILD(this)->fops->getxattr,
- &loc,
- PUMP_PATH, NULL);
-
- ret = 0;
-
-out:
- if (ret < 0) {
- local->op_ret = ret;
- pump_command_reply (frame, this);
- }
-
- loc_wipe (&loc);
- return 0;
-}
-
-static int
-pump_cleanup_helper (void *data) {
- call_frame_t *frame = data;
-
- pump_xattr_cleaner (frame, 0, frame->this, 0, 0, NULL);
-
- return 0;
-}
-
-static int
-pump_cleanup_done (int ret, call_frame_t *sync_frame, void *data)
-{
- STACK_DESTROY (sync_frame->root);
-
- return 0;
-}
-
-int
-pump_execute_commit (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
- afr_local_t *local = NULL;
- call_frame_t *sync_frame = NULL;
- int ret = 0;
-
- priv = this->private;
- pump_priv = priv->pump_private;
- local = frame->local;
-
- local->op_ret = 0;
- if (pump_priv->pump_finished) {
- pump_change_state (this, PUMP_STATE_COMMIT);
- sync_frame = create_frame (this, this->ctx->pool);
- ret = synctask_new (pump_priv->env, pump_cleanup_helper,
- pump_cleanup_done, sync_frame, frame);
- if (ret) {
- gf_msg_debug (this->name, 0, "Couldn't create "
- "synctask for cleaning up xattrs.");
- }
-
- } else {
- gf_msg (this->name, GF_LOG_ERROR, EINPROGRESS,
- AFR_MSG_MIGRATION_IN_PROGRESS,
- "Commit can't proceed. Migration in progress");
- local->op_ret = -1;
- local->op_errno = EINPROGRESS;
- pump_command_reply (frame, this);
- }
-
- return 0;
-}
-int
-pump_execute_abort (call_frame_t *frame, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- pump_private_t *pump_priv = NULL;
- afr_local_t *local = NULL;
- call_frame_t *sync_frame = NULL;
- int ret = 0;
-
- priv = this->private;
- pump_priv = priv->pump_private;
- local = frame->local;
-
- pump_change_state (this, PUMP_STATE_ABORT);
-
- LOCK (&pump_priv->resume_path_lock);
- {
- pump_priv->number_files_pumped = 0;
- pump_priv->current_file[0] = '\0';
- }
- UNLOCK (&pump_priv->resume_path_lock);
-
- local->op_ret = 0;
- if (pump_priv->pump_finished) {
- sync_frame = create_frame (this, this->ctx->pool);
- ret = synctask_new (pump_priv->env, pump_cleanup_helper,
- pump_cleanup_done, sync_frame, frame);
- if (ret) {
- gf_msg_debug (this->name, 0, "Couldn't create "
- "synctask for cleaning up xattrs.");
- }
-
- } else {
- pump_priv->cleaner = fop_setxattr_cbk_stub (frame,
- pump_xattr_cleaner,
- 0, 0, NULL);
- }
-
- return 0;
-}
-
-gf_boolean_t
-pump_command_status (xlator_t *this, dict_t *dict)
-{
- char *cmd = NULL;
- int dict_ret = -1;
- int ret = _gf_true;
-
- dict_ret = dict_get_str (dict, RB_PUMP_CMD_STATUS, &cmd);
- if (dict_ret < 0) {
- gf_msg_debug (this->name, 0,
- "Not a pump status command");
- ret = _gf_false;
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Hit a pump command - status");
- ret = _gf_true;
-
-out:
- return ret;
-
-}
-
-gf_boolean_t
-pump_command_pause (xlator_t *this, dict_t *dict)
-{
- char *cmd = NULL;
- int dict_ret = -1;
- int ret = _gf_true;
-
- dict_ret = dict_get_str (dict, RB_PUMP_CMD_PAUSE, &cmd);
- if (dict_ret < 0) {
- gf_msg_debug (this->name, 0,
- "Not a pump pause command");
- ret = _gf_false;
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Hit a pump command - pause");
- ret = _gf_true;
-
-out:
- return ret;
-
-}
-
-gf_boolean_t
-pump_command_commit (xlator_t *this, dict_t *dict)
-{
- char *cmd = NULL;
- int dict_ret = -1;
- int ret = _gf_true;
-
- dict_ret = dict_get_str (dict, RB_PUMP_CMD_COMMIT, &cmd);
- if (dict_ret < 0) {
- gf_msg_debug (this->name, 0,
- "Not a pump commit command");
- ret = _gf_false;
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Hit a pump command - commit");
- ret = _gf_true;
-
-out:
- return ret;
-
-}
-
-gf_boolean_t
-pump_command_abort (xlator_t *this, dict_t *dict)
-{
- char *cmd = NULL;
- int dict_ret = -1;
- int ret = _gf_true;
-
- dict_ret = dict_get_str (dict, RB_PUMP_CMD_ABORT, &cmd);
- if (dict_ret < 0) {
- gf_msg_debug (this->name, 0,
- "Not a pump abort command");
- ret = _gf_false;
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Hit a pump command - abort");
- ret = _gf_true;
-
-out:
- return ret;
-
-}
-
-gf_boolean_t
-pump_command_start (xlator_t *this, dict_t *dict)
-{
- char *cmd = NULL;
- int dict_ret = -1;
- int ret = _gf_true;
-
- dict_ret = dict_get_str (dict, RB_PUMP_CMD_START, &cmd);
- if (dict_ret < 0) {
- gf_msg_debug (this->name, 0,
- "Not a pump start command");
- ret = _gf_false;
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Hit a pump command - start");
- ret = _gf_true;
-
-out:
- return ret;
-
-}
-
-int
-pump_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
- const char *name, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- int op_errno = 0;
- int ret = 0;
-
- priv = this->private;
-
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame, default_getxattr_cbk,
- FIRST_CHILD (this),
- (FIRST_CHILD (this))->fops->getxattr,
- loc, name, xdata);
- return 0;
- }
-
- if (name) {
- if (!strncmp (name, AFR_XATTR_PREFIX,
- strlen (AFR_XATTR_PREFIX))) {
-
- op_errno = ENODATA;
- ret = -1;
- goto out;
- }
-
- if (!strcmp (name, RB_PUMP_CMD_STATUS)) {
- gf_msg_debug (this->name, 0,
- "Hit pump command - status");
- pump_execute_status (frame, this);
- goto out;
- }
- }
-
- afr_getxattr (frame, this, loc, name, xdata);
-
-out:
- if (ret < 0)
- AFR_STACK_UNWIND (getxattr, frame, -1, op_errno, NULL, NULL);
- return 0;
-}
-
-int
-pump_command_reply (call_frame_t *frame, xlator_t *this)
-{
- afr_local_t *local = NULL;
-
- local = frame->local;
-
- if (local->op_ret < 0)
- gf_msg (this->name, GF_LOG_INFO,
- 0, AFR_MSG_INFO_COMMON,
- "Command failed");
- else
- gf_msg (this->name, GF_LOG_INFO,
- 0, AFR_MSG_INFO_COMMON,
- "Command succeeded");
-
- AFR_STACK_UNWIND (setxattr,
- frame,
- local->op_ret,
- local->op_errno, NULL);
-
- return 0;
-}
-
-int
-pump_parse_command (call_frame_t *frame, xlator_t *this, dict_t *dict,
- int *op_errno_p)
-{
- afr_local_t *local = NULL;
- int ret = -1;
- int op_errno = 0;
-
- if (pump_command_start (this, dict)) {
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
- local->dict = dict_ref (dict);
- ret = pump_execute_start (frame, this);
-
- } else if (pump_command_pause (this, dict)) {
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
- local->dict = dict_ref (dict);
- ret = pump_execute_pause (frame, this);
-
- } else if (pump_command_abort (this, dict)) {
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
- local->dict = dict_ref (dict);
- ret = pump_execute_abort (frame, this);
-
- } else if (pump_command_commit (this, dict)) {
- local = AFR_FRAME_INIT (frame, op_errno);
- if (!local)
- goto out;
- local->dict = dict_ref (dict);
- ret = pump_execute_commit (frame, this);
- }
-out:
- if (op_errno_p)
- *op_errno_p = op_errno;
- return ret;
-}
-
-int
-pump_setxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
- int32_t flags, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- int ret = -1;
- int op_errno = 0;
-
- GF_IF_INTERNAL_XATTR_GOTO ("trusted.glusterfs.pump*", dict, op_errno, out);
-
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame, default_setxattr_cbk,
- FIRST_CHILD (this),
- (FIRST_CHILD (this))->fops->setxattr,
- loc, dict, flags, xdata);
- return 0;
- }
-
- ret = pump_parse_command (frame, this, dict, &op_errno);
- if (ret >= 0)
- goto out;
-
- afr_setxattr (frame, this, loc, dict, flags, xdata);
-
- ret = 0;
-out:
- if (ret < 0) {
- AFR_STACK_UNWIND (setxattr, frame, -1, op_errno, NULL);
- }
-
- return 0;
-}
-
-/* Defaults */
-static int32_t
-pump_lookup (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- dict_t *xattr_req)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_lookup_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup,
- loc,
- xattr_req);
- return 0;
- }
-
- afr_lookup (frame, this, loc, xattr_req);
- return 0;
-}
-
-
-static int32_t
-pump_truncate (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- off_t offset, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_truncate_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->truncate,
- loc,
- offset, xdata);
- return 0;
- }
-
- afr_truncate (frame, this, loc, offset, xdata);
- return 0;
-}
-
-
-static int32_t
-pump_ftruncate (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- off_t offset, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_ftruncate_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->ftruncate,
- fd,
- offset, xdata);
- return 0;
- }
-
- afr_ftruncate (frame, this, fd, offset, xdata);
- return 0;
-}
-
-
-
-
-int
-pump_mknod (call_frame_t *frame, xlator_t *this,
- loc_t *loc, mode_t mode, dev_t rdev, mode_t umask, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame, default_mknod_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mknod,
- loc, mode, rdev, umask, xdata);
- return 0;
- }
- afr_mknod (frame, this, loc, mode, rdev, umask, xdata);
- return 0;
-
-}
-
-
-
-int
-pump_mkdir (call_frame_t *frame, xlator_t *this,
- loc_t *loc, mode_t mode, mode_t umask, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame, default_mkdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mkdir,
- loc, mode, umask, xdata);
- return 0;
- }
- afr_mkdir (frame, this, loc, mode, umask, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_unlink (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc, int xflag, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_unlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->unlink,
- loc, xflag, xdata);
- return 0;
- }
- afr_unlink (frame, this, loc, xflag, xdata);
- return 0;
-
-}
-
-
-static int
-pump_rmdir (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int flags, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
-
- priv = this->private;
-
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame, default_rmdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rmdir,
- loc, flags, xdata);
- return 0;
- }
-
- afr_rmdir (frame, this, loc, flags, xdata);
- return 0;
-
-}
-
-
-
-int
-pump_symlink (call_frame_t *frame, xlator_t *this,
- const char *linkpath, loc_t *loc, mode_t umask, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame, default_symlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->symlink,
- linkpath, loc, umask, xdata);
- return 0;
- }
- afr_symlink (frame, this, linkpath, loc, umask, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_rename (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_rename_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rename,
- oldloc, newloc, xdata);
- return 0;
- }
- afr_rename (frame, this, oldloc, newloc, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_link (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_link_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->link,
- oldloc, newloc, xdata);
- return 0;
- }
- afr_link (frame, this, oldloc, newloc, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_create (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int32_t flags, mode_t mode,
- mode_t umask, fd_t *fd, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame, default_create_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->create,
- loc, flags, mode, umask, fd, xdata);
- return 0;
- }
- afr_create (frame, this, loc, flags, mode, umask, fd, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_open (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t flags, fd_t *fd, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_open_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->open,
- loc, flags, fd, xdata);
- return 0;
- }
- afr_open (frame, this, loc, flags, fd, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_writev (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- struct iovec *vector,
- int32_t count,
- off_t off, uint32_t flags,
- struct iobref *iobref, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_writev_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->writev,
- fd,
- vector,
- count,
- off, flags,
- iobref, xdata);
- return 0;
- }
-
- afr_writev (frame, this, fd, vector, count, off, flags, iobref, xdata);
- return 0;
-}
-
-
-static int32_t
-pump_flush (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_flush_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->flush,
- fd, xdata);
- return 0;
- }
- afr_flush (frame, this, fd, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_fsync (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- int32_t flags, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_fsync_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsync,
- fd,
- flags, xdata);
- return 0;
- }
- afr_fsync (frame, this, fd, flags, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_opendir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc, fd_t *fd, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_opendir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->opendir,
- loc, fd, xdata);
- return 0;
- }
- afr_opendir (frame, this, loc, fd, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_fsyncdir (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- int32_t flags, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_fsyncdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsyncdir,
- fd,
- flags, xdata);
- return 0;
- }
- afr_fsyncdir (frame, this, fd, flags, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_xattrop (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- gf_xattrop_flags_t flags,
- dict_t *dict, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_xattrop_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop,
- loc,
- flags,
- dict, xdata);
- return 0;
- }
- afr_xattrop (frame, this, loc, flags, dict, xdata);
- return 0;
-
-}
-
-static int32_t
-pump_fxattrop (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- gf_xattrop_flags_t flags,
- dict_t *dict, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_fxattrop_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fxattrop,
- fd,
- flags,
- dict, xdata);
- return 0;
- }
- afr_fxattrop (frame, this, fd, flags, dict, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_removexattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- const char *name, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- int op_errno = -1;
-
- VALIDATE_OR_GOTO (this, out);
-
- GF_IF_NATIVE_XATTR_GOTO ("trusted.glusterfs.pump*",
- name, op_errno, out);
-
- op_errno = 0;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_removexattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->removexattr,
- loc,
- name, xdata);
- return 0;
- }
- afr_removexattr (frame, this, loc, name, xdata);
-
- out:
- if (op_errno)
- AFR_STACK_UNWIND (removexattr, frame, -1, op_errno, NULL);
- return 0;
-
-}
-
-
-
-static int32_t
-pump_readdir (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- size_t size,
- off_t off, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_readdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readdir,
- fd, size, off, xdata);
- return 0;
- }
- afr_readdir (frame, this, fd, size, off, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_readdirp (call_frame_t *frame, xlator_t *this, fd_t *fd,
- size_t size, off_t off, dict_t *dict)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_readdirp_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readdirp,
- fd, size, off, dict);
- return 0;
- }
- afr_readdirp (frame, this, fd, size, off, dict);
- return 0;
-
-}
-
-
-
-static int32_t
-pump_releasedir (xlator_t *this,
- fd_t *fd)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (priv->use_afr_in_pump)
- afr_releasedir (this, fd);
- return 0;
-
-}
-
-static int32_t
-pump_release (xlator_t *this,
- fd_t *fd)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (priv->use_afr_in_pump)
- afr_release (this, fd);
- return 0;
-
-}
-
-static int32_t
-pump_forget (xlator_t *this, inode_t *inode)
-{
- afr_private_t *priv = NULL;
-
- priv = this->private;
- if (priv->use_afr_in_pump)
- afr_forget (this, inode);
-
- return 0;
-}
-
-static int32_t
-pump_setattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- struct iatt *stbuf,
- int32_t valid, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_setattr_cbk,
- FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->setattr,
- loc, stbuf, valid, xdata);
- return 0;
- }
- afr_setattr (frame, this, loc, stbuf, valid, xdata);
- return 0;
-
-}
-
-
-static int32_t
-pump_fsetattr (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- struct iatt *stbuf,
- int32_t valid, dict_t *xdata)
-{
- afr_private_t *priv = NULL;
- priv = this->private;
- if (!priv->use_afr_in_pump) {
- STACK_WIND (frame,
- default_fsetattr_cbk,
- FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->fsetattr,
- fd, stbuf, valid, xdata);
- return 0;
- }
- afr_fsetattr (frame, this, fd, stbuf, valid, xdata);
- return 0;
-
-}
-
-
-/* End of defaults */
-
-
-int32_t
-mem_acct_init (xlator_t *this)
-{
- int ret = -1;
-
- if (!this)
- return ret;
-
- ret = xlator_mem_acct_init (this, gf_afr_mt_end + 1);
-
- if (ret != 0) {
- return ret;
- }
-
- return ret;
-}
-
-static int
-is_xlator_pump_sink (xlator_t *child)
-{
- return (child == PUMP_SINK_CHILD(THIS));
-}
-
-static int
-is_xlator_pump_source (xlator_t *child)
-{
- return (child == PUMP_SOURCE_CHILD(THIS));
-}
-
-int32_t
-notify (xlator_t *this, int32_t event,
- void *data, ...)
-{
- int ret = -1;
- xlator_t *child_xl = NULL;
-
- child_xl = (xlator_t *) data;
-
- ret = afr_notify (this, event, data, NULL);
-
- switch (event) {
- case GF_EVENT_CHILD_DOWN:
- if (is_xlator_pump_source (child_xl))
- pump_change_state (this, PUMP_STATE_ABORT);
- break;
-
- case GF_EVENT_CHILD_UP:
- if (is_xlator_pump_sink (child_xl))
- if (is_pump_start_pending (this)) {
- gf_msg_debug (this->name, 0,
- "about to start synctask");
- ret = pump_start_synctask (this);
- if (ret < 0)
- gf_msg_debug (this->name, 0,
- "Could not start pump "
- "synctask");
- else
- pump_remove_start_pending (this);
- }
- }
-
- return ret;
-}
-
-int32_t
-init (xlator_t *this)
-{
- afr_private_t * priv = NULL;
- pump_private_t *pump_priv = NULL;
- int child_count = 0;
- xlator_list_t * trav = NULL;
- int i = 0;
- int ret = -1;
- GF_UNUSED int op_errno = 0;
-
- int source_child = 0;
-
- if (!this->children) {
- gf_msg (this->name, GF_LOG_ERROR,
- 0, AFR_MSG_CHILD_MISCONFIGURED,
- "pump translator needs a source and sink"
- "subvolumes defined.");
- return -1;
- }
-
- if (!this->parents) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- AFR_MSG_VOL_MISCONFIGURED, "Volume is dangling.");
- }
-
- priv = GF_CALLOC (1, sizeof (afr_private_t), gf_afr_mt_afr_private_t);
- if (!priv)
- goto out;
-
- LOCK_INIT (&priv->lock);
-
- child_count = xlator_subvolume_count (this);
- if (child_count != 2) {
- gf_msg (this->name, GF_LOG_ERROR,
- 0, AFR_MSG_CHILD_MISCONFIGURED,
- "There should be exactly 2 children - one source "
- "and one sink");
- return -1;
- }
- priv->child_count = child_count;
-
- priv->read_child = source_child;
- priv->favorite_child = source_child;
- priv->background_self_heal_count = 0;
-
- priv->data_self_heal = "on";
- priv->metadata_self_heal = 1;
- priv->entry_self_heal = 1;
-
- priv->data_self_heal_window_size = 16;
-
- priv->data_change_log = 1;
- priv->metadata_change_log = 1;
- priv->entry_change_log = 1;
- priv->use_afr_in_pump = 1;
- priv->sh_readdir_size = 65536;
-
- /* Locking options */
-
- /* Lock server count infact does not matter. Locks are held
- on all subvolumes, in this case being the source
- and the sink.
- */
-
- priv->child_up = GF_CALLOC (sizeof (unsigned char), child_count,
- gf_afr_mt_char);
- if (!priv->child_up) {
- op_errno = ENOMEM;
- goto out;
- }
-
- priv->children = GF_CALLOC (sizeof (xlator_t *), child_count,
- gf_afr_mt_xlator_t);
- if (!priv->children) {
- op_errno = ENOMEM;
- goto out;
- }
-
- priv->pending_key = GF_CALLOC (sizeof (*priv->pending_key),
- child_count,
- gf_afr_mt_char);
- if (!priv->pending_key) {
- op_errno = ENOMEM;
- goto out;
- }
-
- trav = this->children;
- i = 0;
- while (i < child_count) {
- priv->children[i] = trav->xlator;
-
- ret = gf_asprintf (&priv->pending_key[i], "%s.%s",
- AFR_XATTR_PREFIX,
- trav->xlator->name);
- if (-1 == ret) {
- op_errno = ENOMEM;
- goto out;
- }
-
- trav = trav->next;
- i++;
- }
-
- ret = gf_asprintf (&priv->sh_domain, "%s-self-heal", this->name);
- if (-1 == ret) {
- op_errno = ENOMEM;
- goto out;
- }
-
- priv->root_inode = NULL;
-
- priv->last_event = GF_CALLOC (child_count, sizeof (*priv->last_event),
- gf_afr_mt_int32_t);
- if (!priv->last_event) {
- ret = -ENOMEM;
- goto out;
- }
-
- pump_priv = GF_CALLOC (1, sizeof (*pump_priv),
- gf_afr_mt_pump_priv);
- if (!pump_priv) {
- op_errno = ENOMEM;
- goto out;
- }
-
- LOCK_INIT (&pump_priv->resume_path_lock);
- LOCK_INIT (&pump_priv->pump_state_lock);
-
- pump_priv->resume_path = GF_CALLOC (1, PATH_MAX,
- gf_afr_mt_char);
- if (!pump_priv->resume_path) {
- ret = -1;
- goto out;
- }
-
- pump_priv->env = this->ctx->env;
- if (!pump_priv->env) {
- ret = -1;
- goto out;
- }
-
- /* keep more local here as we may need them for self-heal etc */
- this->local_pool = mem_pool_new (afr_local_t, 128);
- if (!this->local_pool) {
- ret = -1;
- goto out;
- }
-
- priv->pump_private = pump_priv;
- pump_priv = NULL;
-
- this->private = priv;
- priv = NULL;
-
- pump_change_state (this, PUMP_STATE_ABORT);
-
- ret = 0;
-out:
-
- if (pump_priv) {
- GF_FREE (pump_priv->resume_path);
- LOCK_DESTROY (&pump_priv->resume_path_lock);
- LOCK_DESTROY (&pump_priv->pump_state_lock);
- GF_FREE (pump_priv);
- }
-
- if (priv) {
- GF_FREE (priv->child_up);
- GF_FREE (priv->children);
- GF_FREE (priv->pending_key);
- GF_FREE (priv->last_event);
- LOCK_DESTROY (&priv->lock);
- GF_FREE (priv);
- }
-
- return ret;
-}
-
-int
-fini (xlator_t *this)
-{
- afr_private_t * priv = NULL;
- pump_private_t *pump_priv = NULL;
-
- priv = this->private;
- this->private = NULL;
- if (!priv)
- goto out;
-
- pump_priv = priv->pump_private;
- if (!pump_priv)
- goto afr_priv;
-
- GF_FREE (pump_priv->resume_path);
- LOCK_DESTROY (&pump_priv->resume_path_lock);
- LOCK_DESTROY (&pump_priv->pump_state_lock);
- GF_FREE (pump_priv);
-afr_priv:
- afr_priv_destroy (priv);
-out:
- return 0;
-}
-
-
-struct xlator_fops fops = {
- .lookup = pump_lookup,
- .open = pump_open,
- .flush = pump_flush,
- .fsync = pump_fsync,
- .fsyncdir = pump_fsyncdir,
- .xattrop = pump_xattrop,
- .fxattrop = pump_fxattrop,
- .getxattr = pump_getxattr,
-
- /* inode write */
- .writev = pump_writev,
- .truncate = pump_truncate,
- .ftruncate = pump_ftruncate,
- .setxattr = pump_setxattr,
- .setattr = pump_setattr,
- .fsetattr = pump_fsetattr,
- .removexattr = pump_removexattr,
-
- /* dir read */
- .opendir = pump_opendir,
- .readdir = pump_readdir,
- .readdirp = pump_readdirp,
-
- /* dir write */
- .create = pump_create,
- .mknod = pump_mknod,
- .mkdir = pump_mkdir,
- .unlink = pump_unlink,
- .rmdir = pump_rmdir,
- .link = pump_link,
- .symlink = pump_symlink,
- .rename = pump_rename,
-};
-
-struct xlator_dumpops dumpops = {
- .priv = afr_priv_dump,
-};
-
-
-struct xlator_cbks cbks = {
- .release = pump_release,
- .releasedir = pump_releasedir,
- .forget = pump_forget,
-};
-
-struct volume_options options[] = {
- { .key = {NULL} },
-};
diff --git a/xlators/cluster/afr/src/pump.h b/xlators/cluster/afr/src/pump.h
deleted file mode 100644
index 7d5acd02bf6..00000000000
--- a/xlators/cluster/afr/src/pump.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#ifndef __PUMP_H__
-#define __PUMP_H__
-
-#include "syncop.h"
-
-/* FIXME: Needs to be defined in a common file */
-#define CLIENT_CMD_CONNECT "trusted.glusterfs.client-connect"
-#define CLIENT_CMD_DISCONNECT "trusted.glusterfs.client-disconnect"
-
-#define PUMP_SOURCE_COMPLETE "trusted.glusterfs.pump-source-complete"
-#define PUMP_SINK_COMPLETE "trusted.glusterfs.pump-sink-complete"
-
-#define PUMP_PATH "trusted.glusterfs.pump-path"
-
-#define PUMP_SOURCE_CHILD(xl) (xl->children->xlator)
-#define PUMP_SINK_CHILD(xl) (xl->children->next->xlator)
-
-typedef enum {
- PUMP_STATE_RUNNING, /* Pump is running and migrating files */
- PUMP_STATE_RESUME, /* Pump is resuming from a previous pause */
- PUMP_STATE_PAUSE, /* Pump is paused */
- PUMP_STATE_ABORT, /* Pump is aborted */
- PUMP_STATE_COMMIT, /* Pump is committed */
-} pump_state_t;
-
-typedef struct _pump_private {
- struct syncenv *env; /* The env pointer to the pump synctask */
- char *resume_path; /* path to resume from the last pause */
- gf_lock_t resume_path_lock; /* Synchronize resume_path changes */
- gf_lock_t pump_state_lock; /* Synchronize pump_state changes */
- pump_state_t pump_state; /* State of pump */
- char current_file[PATH_MAX]; /* Current file being pumped */
- uint64_t number_files_pumped; /* Number of files pumped */
- gf_boolean_t pump_finished; /* Boolean to indicate pump termination */
- char pump_start_pending; /* Boolean to mark start pending until
- CHILD_UP */
- call_stub_t *cleaner;
-} pump_private_t;
-
-void
-build_root_loc (inode_t *inode, loc_t *loc);
-int pump_start (call_frame_t *frame, xlator_t *this);
-
-gf_boolean_t
-pump_command_start (xlator_t *this, dict_t *dict);
-
-int
-pump_execute_start (call_frame_t *frame, xlator_t *this);
-
-gf_boolean_t
-pump_command_pause (xlator_t *this, dict_t *dict);
-
-int
-pump_execute_pause (call_frame_t *frame, xlator_t *this);
-
-gf_boolean_t
-pump_command_abort (xlator_t *this, dict_t *dict);
-
-int
-pump_execute_abort (call_frame_t *frame, xlator_t *this);
-
-gf_boolean_t
-pump_command_status (xlator_t *this, dict_t *dict);
-
-int
-pump_execute_status (call_frame_t *frame, xlator_t *this);
-
-int
-pump_command_reply (call_frame_t *frame, xlator_t *this);
-
-#endif /* __PUMP_H__ */
diff --git a/xlators/cluster/dht/src/Makefile.am b/xlators/cluster/dht/src/Makefile.am
index 29be5ce4776..56f1f2ad7c8 100644
--- a/xlators/cluster/dht/src/Makefile.am
+++ b/xlators/cluster/dht/src/Makefile.am
@@ -1,7 +1,4 @@
xlator_LTLIBRARIES = dht.la nufa.la switch.la
-if BUILD_GFDB
- xlator_LTLIBRARIES += tier.la
-endif
AM_CFLAGS = -Wall $(GF_CFLAGS)
@@ -10,40 +7,34 @@ xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/cluster
dht_common_source = dht-layout.c dht-helper.c dht-linkfile.c dht-rebalance.c \
dht-selfheal.c dht-rename.c dht-hashfn.c dht-diskusage.c \
dht-common.c dht-inode-write.c dht-inode-read.c dht-shared.c \
- $(top_builddir)/xlators/lib/src/libxlator.c
+ dht-lock.c $(top_builddir)/xlators/lib/src/libxlator.c
dht_la_SOURCES = $(dht_common_source) dht.c
nufa_la_SOURCES = $(dht_common_source) nufa.c
switch_la_SOURCES = $(dht_common_source) switch.c
-tier_la_SOURCES = $(dht_common_source) tier.c tier-common.c
-dht_la_LDFLAGS = -module -avoid-version -export-symbols $(top_srcdir)/xlators/cluster/dht/src/dht.sym
+dht_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
dht_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-nufa_la_LDFLAGS = -module -avoid-version -export-symbols $(top_srcdir)/xlators/cluster/dht/src/nufa.sym
+nufa_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
nufa_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-switch_la_LDFLAGS = -module -avoid-version -export-symbols $(top_srcdir)/xlators/cluster/dht/src/switch.sym
+switch_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
switch_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-tier_la_LDFLAGS = -module -avoid-version -export-symbols $(top_srcdir)/xlators/cluster/dht/src/tier.sym
-tier_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-
-noinst_HEADERS = dht-common.h dht-mem-types.h dht-messages.h dht-helper.h tier-common.h tier.h\
- $(top_builddir)/xlators/lib/src/libxlator.h
+noinst_HEADERS = dht-common.h dht-mem-types.h dht-messages.h \
+ dht-lock.h $(top_builddir)/xlators/lib/src/libxlator.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
- -I$(top_srcdir)/libglusterfs/src/gfdb \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/rpc/rpc-lib/src \
-I$(top_srcdir)/xlators/lib/src \
-DDATADIR=\"$(localstatedir)\" \
- -DLIBDIR=\"$(libdir)\" \
- -DLIBGFDB_VERSION=\"$(LIBGFDB_VERSION)\"
+ -DLIBDIR=\"$(libdir)\"
CLEANFILES =
-EXTRA_DIST = dht.sym nufa.sym switch.sym tier.sym
-
uninstall-local:
rm -f $(DESTDIR)$(xlatordir)/distribute.so
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index 4c93084ec82..8ba0cc4c732 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -8,183 +8,439 @@
cases as published by the Free Software Foundation.
*/
-
/* TODO: add NS locking */
-#include "glusterfs.h"
-#include "xlator.h"
#include "libxlator.h"
#include "dht-common.h"
-#include "defaults.h"
-#include "byte-order.h"
-#include "glusterfs-acl.h"
-#include "quota-common-utils.h"
+#include "dht-lock.h"
+#include <glusterfs/byte-order.h>
+#include <glusterfs/quota-common-utils.h>
+#include <glusterfs/upcall-utils.h>
+#include "glusterfs/compat-errno.h" // for ENODATA on BSD
+#include <glusterfs/common-utils.h>
#include