From 739940667f4d7f32bd676ba1bea6b1f13426ae03 Mon Sep 17 00:00:00 2001 From: Kotresh HR Date: Wed, 16 Oct 2019 14:25:47 +0530 Subject: geo-rep: Fix config upgrade on non-participating node After upgrade, if the config files are of old format, it gets migrated to new format. Monitor process migrates it. Since monitor doesn't run on nodes where bricks are not hosted, it doesn't get migrated there. So this patch fixes the config upgrade on nodes which doesn't host bricks. This happens during config either on get/set/reset. Change-Id: Ibade2f2310b0f3affea21a3baa1ae0eb71162cba Signed-off-by: Kotresh HR fixes: bz#1762220 --- tests/00-geo-rep/gsyncd.conf.old | 47 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 tests/00-geo-rep/gsyncd.conf.old (limited to 'tests/00-geo-rep/gsyncd.conf.old') diff --git a/tests/00-geo-rep/gsyncd.conf.old b/tests/00-geo-rep/gsyncd.conf.old new file mode 100644 index 00000000000..519acaf8f3e --- /dev/null +++ b/tests/00-geo-rep/gsyncd.conf.old @@ -0,0 +1,47 @@ +[__meta__] +version = 2.0 + +[peersrx . .] +remote_gsyncd = /usr/local/libexec/glusterfs/gsyncd +georep_session_working_dir = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/ +ssh_command_tar = ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i /var/lib/glusterd/geo-replication/tar_ssh.pem +changelog_log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}${local_id}-changes.log +working_dir = /var/lib/misc/glusterfsd/${mastervol}/${eSlave} +ignore_deletes = false +pid_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.pid +state_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.status +gluster_command_dir = /usr/local/sbin/ +gluster_params = aux-gfid-mount acl +ssh_command = ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i /var/lib/glusterd/geo-replication/secret.pem +state_detail_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/${eSlave}-detail.status +state_socket_unencoded = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/${eSlave}.socket +socketdir = /var/run/gluster +log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}.log +gluster_log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}${local_id}.gluster.log +special_sync_mode = partial +change_detector = changelog +pid-file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.pid +state-file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.status + +[__section_order__] +peersrx . . = 0 +peersrx . %5essh%3a = 2 +peersrx . = 3 +peers master slave = 4 + +[peersrx . %5Essh%3A] +remote_gsyncd = /nonexistent/gsyncd + +[peersrx .] +gluster_command_dir = /usr/local/sbin/ +gluster_params = aux-gfid-mount acl +log_file = /var/log/glusterfs/geo-replication-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.log +log_file_mbr = /var/log/glusterfs/geo-replication-slaves/mbr/${session_owner}:${local_node}${local_id}.${slavevol}.log +gluster_log_file = /var/log/glusterfs/geo-replication-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.gluster.log + +[peers master slave] +session_owner = 0732cbd1-3ec5-4920-ab0d-aa5a896d5214 +master.stime_xattr_name = trusted.glusterfs.0732cbd1-3ec5-4920-ab0d-aa5a896d5214.07a9005c-ace4-4f67-b3c0-73938fb236c4.stime +volume_id = 0732cbd1-3ec5-4920-ab0d-aa5a896d5214 +use_tarssh = true + -- cgit