diff options
author | Joshua Eilers <jeilers@fb.com> | 2017-04-06 11:03:53 -0700 |
---|---|---|
committer | Shreyas Siravara <sshreyas@fb.com> | 2017-09-08 00:19:53 +0000 |
commit | 7f55c2e767440a22ef1e1ac225e3047d18b0f152 (patch) | |
tree | a60b707680f30cb3209215fdcce18f9da50466b4 | |
parent | 868d082bd4384aab6b1b5ede2435c10a89ab8aa2 (diff) |
[io-cache] New volume options for read sizes
Summary:
Two new volume options that control reads.
performance.io-cache.read-size
- Tells gluster how much it should try to read on each posix_readv call
performance.io-cache.min-cached-read-size
- Tells gluster the smallest files it should start caching, anything smaller is not cached
This is a port of D4844662 to 3.8
Change-Id: I5ba891906f97e514e7365cc34374619379434766
Reviewed-on: https://review.gluster.org/18235
Reviewed-by: Shreyas Siravara <sshreyas@fb.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Smoke: Gluster Build System <jenkins@build.gluster.org>
-rw-r--r-- | tests/basic/read-size.t | 65 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-set.c | 12 | ||||
-rw-r--r-- | xlators/performance/io-cache/src/io-cache.c | 29 | ||||
-rw-r--r-- | xlators/performance/io-cache/src/io-cache.h | 2 |
4 files changed, 103 insertions, 5 deletions
diff --git a/tests/basic/read-size.t b/tests/basic/read-size.t new file mode 100644 index 00000000000..7161a5b8c24 --- /dev/null +++ b/tests/basic/read-size.t @@ -0,0 +1,65 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc + +function clear_stats { + > /var/lib/glusterfs/stats/glusterfs_d_backends_${V0}*.dump +} + +function got_expected_read_count { + expected_size=$1 + expected_value=$2 + grep -h aggr.read_${expected_size} /var/lib/glusterd/stats/glusterfsd__d_backends_${V0}*.dump \ + | cut -d':' -f2 \ + | grep "\"$expected_value\"" + if [ $? == 0 ]; then + echo "Y"; + else + echo "N"; + fi +} + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} +# These are needed for our tracking of write sizes +TEST $CLI volume set $V0 nfs.disable off +TEST $CLI volume set $V0 diagnostics.latency-measurement on +TEST $CLI volume set $V0 diagnostics.count-fop-hits on +TEST $CLI volume set $V0 diagnostics.stats-dump-interval 2 +TEST $CLI volume set $V0 performance.io-cache on +TEST $CLI volume set $V0 performance.nfs.io-cache on +TEST $CLI volume set $V0 performance.io-cache.read-size 512KB +EXPECT '512KB' volinfo_field $V0 'performance.io-cache.read-size' +TEST $CLI volume set $V0 performance.io-cache.min-cached-read-size 32KB +EXPECT '32KB' volinfo_field $V0 'performance.io-cache.min-cached-read-size' + +TEST $CLI volume start $V0 + +sleep 2; + +TEST mount.nfs -overs=3,noacl,nolock,noatime $HOSTNAME:/$V0 $N0 + +# First read of big file should not be cached +TEST dd if=/dev/zero of=$N0/100mb_file bs=1M count=100 oflag=sync +TEST cat $N0/100mb_file +EXPECT_WITHIN 3 "Y" got_expected_read_count "512kb" 200 + +# The number of reads should stay the same from the previous cat since they're cached +TEST cat $N0/100mb_file +EXPECT_WITHIN 3 "Y" got_expected_read_count "512kb" 200 + +# Should not be cached +TEST dd if=/dev/zero of=$N0/10kb_file bs=1K count=10 oflag=sync +TEST cat $N0/10kb_file +EXPECT_WITHIN 3 "Y" got_expected_read_count "8kb" 1 + +# The reads should increment indicating they are not being cached +TEST cat $N0/10kb_file +EXPECT_WITHIN 3 "Y" got_expected_read_count "8kb" 2 + +cleanup; diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c index 7345dd714d3..48090c078ae 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c @@ -1470,6 +1470,18 @@ struct volopt_map_entry glusterd_volopt_map[] = { .op_version = 1, .flags = OPT_FLAG_CLIENT_OPT }, + { .key = "performance.io-cache.read-size", + .voltype = "performance/io-cache", + .option = "read-size", + .op_version = 1, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.io-cache.min-cached-read-size", + .voltype = "performance/io-cache", + .option = "min-cached-read-size", + .op_version = 1, + .flags = OPT_FLAG_CLIENT_OPT + }, { .key = "performance.cache-priority", .voltype = "performance/io-cache", .option = "priority", diff --git a/xlators/performance/io-cache/src/io-cache.c b/xlators/performance/io-cache/src/io-cache.c index f199b229bc2..b5533481f26 100644 --- a/xlators/performance/io-cache/src/io-cache.c +++ b/xlators/performance/io-cache/src/io-cache.c @@ -1147,7 +1147,7 @@ ioc_readv (call_frame_t *frame, xlator_t *this, fd_t *fd, } ioc_inode_unlock (ioc_inode); - if (!fd_ctx_get (fd, this, NULL)) { + if (!fd_ctx_get (fd, this, NULL) || size < table->min_cached_read_size) { /* disable caching for this fd, go ahead with normal readv */ STACK_WIND_TAIL (frame, FIRST_CHILD (frame->this), FIRST_CHILD (frame->this)->fops->readv, fd, @@ -1753,6 +1753,12 @@ reconfigure (xlator_t *this, dict_t *options) goto unlock; } + GF_OPTION_RECONF ("read-size", table->read_size, + options, size_uint64, unlock); + + GF_OPTION_RECONF ("min-cached-read-size", table->min_cached_read_size, + options, size_uint64, unlock); + GF_OPTION_RECONF ("cache-size", cache_size_new, options, size_uint64, unlock); if (!check_cache_size_ok (this, cache_size_new)) { @@ -1792,7 +1798,6 @@ init (xlator_t *this) dict_t *xl_options = NULL; uint32_t index = 0; int32_t ret = -1; - glusterfs_ctx_t *ctx = NULL; data_t *data = 0; uint32_t num_pages = 0; @@ -1820,7 +1825,12 @@ init (xlator_t *this) } table->xl = this; - table->page_size = this->ctx->page_size; + + GF_OPTION_INIT ("read-size", table->read_size, size_uint64, out); + + GF_OPTION_INIT ("min-cached-read-size", table->min_cached_read_size, size_uint64, out); + + table->page_size = table->read_size; GF_OPTION_INIT ("cache-size", table->cache_size, size_uint64, out); @@ -1903,8 +1913,7 @@ init (xlator_t *this) ret = 0; - ctx = this->ctx; - ioc_log2_page_size = log_base2 (ctx->page_size); + ioc_log2_page_size = log_base2 (table->page_size); LOCK_INIT (&table->statfs_cache.lock); /* Invalidate statfs cache */ @@ -2257,6 +2266,16 @@ struct volume_options options[] = { .description = "Maximum file size which would be cached by the " "io-cache translator." }, + { .key = {"read-size"}, + .type = GF_OPTION_TYPE_SIZET, + .default_value = "1MB", + .description = "Size of each posix read" + }, + { .key = {"min-cached-read-size"}, + .type = GF_OPTION_TYPE_SIZET, + .default_value = "64KB", + .description = "Minimum size to cache reads" + }, { .key = {"statfs-cache"}, .type = GF_OPTION_TYPE_BOOL, .default_value = "0", diff --git a/xlators/performance/io-cache/src/io-cache.h b/xlators/performance/io-cache/src/io-cache.h index da71b2f2371..9198225791b 100644 --- a/xlators/performance/io-cache/src/io-cache.h +++ b/xlators/performance/io-cache/src/io-cache.h @@ -162,6 +162,8 @@ struct ioc_table { uint64_t cache_used; uint64_t min_file_size; uint64_t max_file_size; + uint64_t read_size; + uint64_t min_cached_read_size; struct list_head inodes; /* list of inodes cached */ struct list_head active; struct list_head *inode_lru; |