diff --git a/doc.ja/src/sgml/advanced.sgml b/doc.ja/src/sgml/advanced.sgml
index 6f1ef10..f61edbf 100644
--- a/doc.ja/src/sgml/advanced.sgml
+++ b/doc.ja/src/sgml/advanced.sgml
@@ -67,7 +67,7 @@
また、watchdogは、接続したすべてのPgpool-IIノードを調停し、フェイルバック、フェイルオーバ、フォローマスターコマンドがただひとつのPgpool-IIで実行されるようにします。
diff --git a/doc.ja/src/sgml/connection-pooling.sgml b/doc.ja/src/sgml/connection-pooling.sgml
index 2893d24..4a17eb1 100644
--- a/doc.ja/src/sgml/connection-pooling.sgml
+++ b/doc.ja/src/sgml/connection-pooling.sgml
@@ -1435,7 +1435,7 @@
- follow master child
+ follow primary childfollow_child
diff --git a/doc.ja/src/sgml/connection-settings.sgml b/doc.ja/src/sgml/connection-settings.sgml
index cba426e..4e9b4e3 100644
--- a/doc.ja/src/sgml/connection-settings.sgml
+++ b/doc.ja/src/sgml/connection-settings.sgml
@@ -1476,7 +1476,7 @@ S2/N2: COMMIT;
Pgpool-II> V2.2.6>, V2.3> or later
allows alllow updating the values by reloading a configuration file.
This is useful if you want to prevent any query sent to
- slaves to perform some administrative work in master/slave mode.
+ standbys to perform some administrative work in native replication mode.
-->
新しいbackend_weight>はパラメータ行を追加して、設定ファイル再読み込みすることで追加できます。
Pgpool-II> V2.2.6>、V2.3>以降では、設定ファイルの再読込みで値を変更できます。
diff --git a/doc.ja/src/sgml/example-Aurora.sgml b/doc.ja/src/sgml/example-Aurora.sgml
index cdf4de1..cab014c 100644
--- a/doc.ja/src/sgml/example-Aurora.sgml
+++ b/doc.ja/src/sgml/example-Aurora.sgml
@@ -15,14 +15,14 @@
streaming replication cluster with some exceptions. First,
fail over and online recovery are managed
by Aurora. So you don't need to
- set , ,
+ set , ,
and recovery related parameters. In this section we explain
how to set up Pgpool-II for Aurora.
-->
Amazon Aurora for PostgreSQL
Compatibility (Aurora) は、PostgreSQL用のマネージドサービスです。
ユーザから見ると、Auroraは、いくつか例外があるものの、ストリーミングレプリケーションのクラスタのように見えます。フェイルオーバやオンラインリカバリはAuroraによって管理されます。
- ですから、、、
+ ですから、、、
それにオンラインリカバリ関連のパラメータは設定の必要がありません。
この章では、Aurora用のPgpool-II設定を説明します。
diff --git a/doc.ja/src/sgml/example-cluster.sgml b/doc.ja/src/sgml/example-cluster.sgml
index 3bcd850..b3e8117 100644
--- a/doc.ja/src/sgml/example-cluster.sgml
+++ b/doc.ja/src/sgml/example-cluster.sgml
@@ -456,14 +456,14 @@
フェイルオーバの設定PostgreSQLバックエンドノードがダウンした時に実行するスクリプトをに設定します。
- また、PostgreSQLサーバが3台の場合、プライマリノードのフェイルオーバ後に新しいプライマリからスレーブをリカバリするためにも設定する必要があります。はプライマリノードのフェイルオーバ後に実行されます。PostgreSQLサーバが2台の場合、の設定は不要です。
+ また、PostgreSQLサーバが3台の場合、プライマリノードのフェイルオーバ後に新しいプライマリからスレーブをリカバリするためにも設定する必要があります。はプライマリノードのフェイルオーバ後に実行されます。PostgreSQLサーバが2台の場合、の設定は不要です。
それぞれの実行スクリプトの引数は、それぞれ実行時にPgpool-IIによってバックエンドの具体的な情報に置き換えられます。各引数の意味はをご参照ください。
failover_command = '/etc/pgpool-II/failover.sh %d %h %p %D %m %H %M %P %r %R %N %S'
- follow_master_command = '/etc/pgpool-II/follow_master.sh %d %h %p %D %m %H %M %P %r %R'
+ follow_primary_command = '/etc/pgpool-II/follow_primary.sh %d %h %p %D %m %H %M %P %r %R'
@@ -472,12 +472,12 @@
- サンプルスクリプトfailover.sh及びfollow_master.shは
+ サンプルスクリプトfailover.sh及びfollow_primary.shは
/etc/pgpool-II/配下にインストールされていますので、これらのファイルをコピーして作成します。
# cp /etc/pgpool-II/failover.sh{.sample,}
- # cp /etc/pgpool-II/follow_master.sh{.sample,}
+ # cp /etc/pgpool-II/follow_primary.sh{.sample,}
基本的にはPGHOMEを環境に合わせて変更すれば、動作します。
@@ -488,7 +488,7 @@
PGHOME=/usr/pgsql-11
...
- [server1]# vi /etc/pgpool-II/follow_master.sh
+ [server1]# vi /etc/pgpool-II/follow_primary.sh
...
PGHOME=/usr/pgsql-11
...
@@ -904,7 +904,7 @@ arping_path = '/usr/sbin'
.pcppassの設定
- 前述のfollow_master_commandのスクリプトでパスワード入力なしでPCPコマンドを実行できるように、すべてのサーバでPgpool-IIの起動ユーザのホームディレクトリに.pcppassを作成します。
+ 前述のfollow_primary_commandのスクリプトでパスワード入力なしでPCPコマンドを実行できるように、すべてのサーバでPgpool-IIの起動ユーザのホームディレクトリに.pcppassを作成します。
[全サーバ]# echo 'localhost:9898:pgpool:pgpool' > ~/.pcppass
diff --git a/doc.ja/src/sgml/failover.sgml b/doc.ja/src/sgml/failover.sgml
index 1176244..5717762 100644
--- a/doc.ja/src/sgml/failover.sgml
+++ b/doc.ja/src/sgml/failover.sgml
@@ -137,21 +137,21 @@
%M古いマスターノードのID%m新しいマスターノードのID%H新しいマスターノードのホスト名
@@ -165,14 +165,14 @@
%r新しいマスターノードのポート番号%R新しいマスターノードのデータベースクラスタパス
@@ -205,12 +205,12 @@
古いマスターノードのID%m新しいマスターノードのID%H新しいマスターノードのホスト名
@@ -383,14 +383,14 @@
%r新しいマスターノードのポート番号%R新しいマスターノードのデータベースクラスタパス
@@ -446,10 +446,10 @@
-
- follow_master_command (string)
+
+ follow_primary_command (string)
- follow_master_command 設定パラメータ
+ follow_primary_command 設定パラメータ
@@ -473,7 +473,7 @@
フォローマスターコマンドオプション
@@ -553,7 +553,7 @@
%M古いマスターノードのID
@@ -621,22 +621,22 @@
- follow_master_commandが空文字列でない場合、ストリーミングレプリケーションによるマスタースレーブでプライマリノードのフェイルオーバーが完了した後に、Pgpool-IIは新しいプライマリ以外のすべてのノードを切り離し、クライアントから再び接続を受け付ける準備のため再度新しい子プロセスを起動します。
- その後、Pgpool-IIは切り離されたそれぞれのノードに対してfollow_master_commandに設定したコマンドを実行します。
+ follow_primary_commandが空文字列でない場合、ストリーミングレプリケーションによるマスタースレーブでプライマリノードのフェイルオーバーが完了した後に、Pgpool-IIは新しいプライマリ以外のすべてのノードを切り離し、クライアントから再び接続を受け付ける準備のため再度新しい子プロセスを起動します。
+ その後、Pgpool-IIは切り離されたそれぞれのノードに対してfollow_primary_commandに設定したコマンドを実行します。
-
- 通常は、follow_master_commandコマンドはコマンドを呼んで新しいプライマリからスレーブをリカバリするために使用します。
- follow_master_command中では、pg_ctlを使ってターゲットのPostgreSQLノードが動いているかどうかを確認することをお勧めします。
+ 通常は、follow_primary_commandコマンドはコマンドを呼んで新しいプライマリからスレーブをリカバリするために使用します。
+ follow_primary_command中では、pg_ctlを使ってターゲットのPostgreSQLノードが動いているかどうかを確認することをお勧めします。
たとえば、そのノードはハードウェア障害で停止しているかも知れませんし、管理者が保守のために停止しているのかも知れません。
ノードが停止している場合は、そのノードをスキップしてください。
ノードが動いている場合は、まずそのノードを停止してからリカバリしてください。
- follow_master_commandの完全な例は、にあります。
+ follow_primary_commandの完全な例は、にあります。
Pgpool-II のノードをマスターに昇格させる
@@ -47,8 +47,8 @@ Pgpool-II documentation
pcp_promote_nodeは、起動された後以下を行います。
- が設定されている場合はそれも起動されるので、十分な注意が必要です。
- 通常はを無効にしてからこのコマンドを実行することをお勧めします。
+ が設定されている場合はそれも起動されるので、十分な注意が必要です。
+ 通常はを無効にしてからこのコマンドを実行することをお勧めします。
@@ -68,7 +68,7 @@ Pgpool-II documentation
- が設定されている場合は、2でダウンさせたPostgreSQLノードに対してを実行します。
+ が設定されている場合は、2でダウンさせたPostgreSQLノードに対してを実行します。
@@ -90,7 +90,7 @@ Pgpool-II documentation
マスターに昇格させるバックエンドノードのインデックスを指定します。
diff --git a/doc.ja/src/sgml/ref/pgpool_setup.sgml b/doc.ja/src/sgml/ref/pgpool_setup.sgml
index 47c1f02..74b80b6 100644
--- a/doc.ja/src/sgml/ref/pgpool_setup.sgml
+++ b/doc.ja/src/sgml/ref/pgpool_setup.sgml
@@ -417,7 +417,7 @@ temporarily start pgpool-II to create standby nodes
recovery node 1...pcp_recovery_node -- Command Successful
done.
-creating follow master script
+creating follow primary script
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay | replication_state | replication_sync_state | last_status_change
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------+-------------------+------------------------+---------------------
0 | /tmp | 11002 | up | 0.500000 | primary | 0 | true | 0 | | | 2020-08-18 13:50:19
diff --git a/doc.ja/src/sgml/ref/watchdog_setup.sgml b/doc.ja/src/sgml/ref/watchdog_setup.sgml
index f14dcb1..7da4bce 100644
--- a/doc.ja/src/sgml/ref/watchdog_setup.sgml
+++ b/doc.ja/src/sgml/ref/watchdog_setup.sgml
@@ -344,7 +344,7 @@ Pgpool-II documentation
recovery node 1...pcp_recovery_node -- Command Successful
done.
- creating follow master script
+ creating follow primary script
Pager usage is off.
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------
@@ -389,7 +389,7 @@ Pgpool-II documentation
recovery node 1...pcp_recovery_node -- Command Successful
done.
- creating follow master script
+ creating follow primary script
Pager usage is off.
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------
@@ -434,7 +434,7 @@ Pgpool-II documentation
recovery node 1...pcp_recovery_node -- Command Successful
done.
- creating follow master script
+ creating follow primary script
Pager usage is off.
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------
diff --git a/doc.ja/src/sgml/start.sgml b/doc.ja/src/sgml/start.sgml
index d48ac0e..c700c2a 100644
--- a/doc.ja/src/sgml/start.sgml
+++ b/doc.ja/src/sgml/start.sgml
@@ -367,23 +367,23 @@
# %h = host name
# %p = port number
# %D = database cluster path
- # %m = new master node id
- # %M = old master node id
- # %H = new master node host name
+ # %m = new main node id
+ # %M = old main node id
+ # %H = new main node host name
# %P = old primary node id
- # %R = new master database cluster path
- # %r = new master port number
+ # %R = new main database cluster path
+ # %r = new main port number
# %% = '%' character
failed_node_id=$1
failed_host_name=$2
failed_port=$3
failed_db_cluster=$4
- new_master_id=$5
- old_master_id=$6
- new_master_host_name=$7
+ new_main_id=$5
+ old_main_id=$6
+ new_main_host_name=$7
old_primary_node_id=$8
- new_master_port_number=$9
- new_master_db_cluster=${10}
+ new_main_port_number=$9
+ new_main_db_cluster=${10}
mydir=/home/t-ishii/tmp/Tutorial
log=$mydir/log/failover.log
pg_ctl=/usr/local/pgsql/bin/pg_ctl
@@ -391,10 +391,10 @@
cluster1=$mydir/data1
date >> $log
- echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_master_id $new_master_id old_master_id $old_master_id new_master_host_name $new_master_host_name old_primary_node_id $old_primary_node_id new_master_port_number $new_master_port_number new_master_db_cluster $new_master_db_cluster" >> $log
+ echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_main_id $new_main_id old_main_id $old_main_id new_main_host_name $new_main_host_name old_primary_node_id $old_primary_node_id new_main_port_number $new_main_port_number new_main_db_cluster $new_main_db_cluster" >> $log
- if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # master failed
- ! new_primary_db_cluster=${mydir}/data"$new_master_id"
+ if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # main failed
+ ! new_primary_db_cluster=${mydir}/data"$new_main_id"
echo $pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over
$pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over
sleep 2
@@ -474,7 +474,7 @@
DATADIR_BASE=/home/t-ishii/tmp/Tutorial
PGSUPERUSER=t-ishii
- master_db_cluster=$1
+ main_db_cluster=$1
recovery_node_host_name=$2
DEST_CLUSTER=$3
PORT=$4
@@ -489,13 +489,13 @@
$psql -p $PORT -c "SELECT pg_start_backup('Streaming Replication', true)" postgres
- echo "source: $master_db_cluster dest: $DEST_CLUSTER" >> $log
+ echo "source: $main_db_cluster dest: $DEST_CLUSTER" >> $log
rsync -C -a -c --delete --exclude postgresql.conf --exclude postmaster.pid \
--exclude postmaster.opts --exclude pg_log \
--exclude recovery.conf --exclude recovery.done \
--exclude pg_xlog \
- $master_db_cluster/ $DEST_CLUSTER/
+ $main_db_cluster/ $DEST_CLUSTER/
rm -fr $DEST_CLUSTER/pg_xlog
mkdir $DEST_CLUSTER/pg_xlog
diff --git a/doc.ja/src/sgml/watchdog.sgml b/doc.ja/src/sgml/watchdog.sgml
index 298fcd9..94000d5 100644
--- a/doc.ja/src/sgml/watchdog.sgml
+++ b/doc.ja/src/sgml/watchdog.sgml
@@ -750,10 +750,10 @@
隔離とフェイルオーバ操作には多くの類似点がありますが、どちらも非常に基本的な方法で異なります。
隔離操作はを実行せず、障害の発生したノードを隔離します。
diff --git a/doc/src/sgml/advanced.sgml b/doc/src/sgml/advanced.sgml
index 2f915e6..cedf5b7 100644
--- a/doc/src/sgml/advanced.sgml
+++ b/doc/src/sgml/advanced.sgml
@@ -50,7 +50,7 @@
Watchdog also coordinates with all connected Pgpool-II nodes to ensure
- that failback, failover and follow_master commands must be executed only on one pgpool-II node.
+ that failback, failover and follow_primary commands must be executed only on one pgpool-II node.
diff --git a/doc/src/sgml/connection-pooling.sgml b/doc/src/sgml/connection-pooling.sgml
index ccc99cf..e68613c 100644
--- a/doc/src/sgml/connection-pooling.sgml
+++ b/doc/src/sgml/connection-pooling.sgml
@@ -949,7 +949,7 @@
- follow master child
+ follow primary childfollow_child
diff --git a/doc/src/sgml/example-Aurora.sgml b/doc/src/sgml/example-Aurora.sgml
index 4f3d52a..d89f83e 100644
--- a/doc/src/sgml/example-Aurora.sgml
+++ b/doc/src/sgml/example-Aurora.sgml
@@ -9,7 +9,7 @@
streaming replication cluster with some exceptions. First,
fail over and online recovery are managed
by Aurora. So you don't need to
- set , ,
+ set , ,
and recovery related parameters. In this section we explain
how to set up Pgpool-II for Aurora.
diff --git a/doc/src/sgml/example-cluster.sgml b/doc/src/sgml/example-cluster.sgml
index 0fa401a..772e507 100644
--- a/doc/src/sgml/example-cluster.sgml
+++ b/doc/src/sgml/example-cluster.sgml
@@ -482,8 +482,8 @@
Specify failover.sh script to be executed after failover in failover_command
parameter.
- If we use 3 PostgreSQL servers, we need to specify follow_master_command to run after failover on the primary node failover.
- In case of two PostgreSQL servers, follow_master_command setting is not necessary.
+ If we use 3 PostgreSQL servers, we need to specify follow_primary_command to run after failover on the primary node failover.
+ In case of two PostgreSQL servers, follow_primary_command setting is not necessary.
Pgpool-II replaces the following special characters with the backend specific
@@ -492,7 +492,7 @@
failover_command = '/etc/pgpool-II/failover.sh %d %h %p %D %m %H %M %P %r %R %N %S'
- follow_master_command = '/etc/pgpool-II/follow_master.sh %d %h %p %D %m %H %M %P %r %R'
+ follow_primary_command = '/etc/pgpool-II/follow_primary.sh %d %h %p %D %m %H %M %P %r %R'
@@ -502,12 +502,12 @@
Sample scripts failover.sh
- and follow_master.sh
+ and follow_primary.sh
are installed in /etc/pgpool-II/. Create failover scripts using these sample files.
# cp /etc/pgpool-II/failover.sh{.sample,}
- # cp /etc/pgpool-II/follow_master.sh{.sample,}
+ # cp /etc/pgpool-II/follow_primary.sh{.sample,}
Basically, it should work if you change PGHOME according to PostgreSQL installation directory.
@@ -518,7 +518,7 @@
PGHOME=/usr/pgsql-11
...
- [server1]# vi /etc/pgpool-II/follow_master.sh
+ [server1]# vi /etc/pgpool-II/follow_primary.sh
...
PGHOME=/usr/pgsql-11
...
@@ -961,7 +961,7 @@ arping_path = '/usr/sbin'
.pcppass
- Since follow_master_command script has to execute PCP command without entering the
+ Since follow_primary_command script has to execute PCP command without entering the
password, we create .pcppass in the home directory of
Pgpool-II startup user (root user).
diff --git a/doc/src/sgml/failover.sgml b/doc/src/sgml/failover.sgml
index 740c51f..004d765 100644
--- a/doc/src/sgml/failover.sgml
+++ b/doc/src/sgml/failover.sgml
@@ -152,15 +152,15 @@
%M
- Old master node ID
+ Old main node ID%m
- New master node ID
+ New main node ID%H
- Hostname of the new master node
+ Hostname of the new main node%P
@@ -168,11 +168,11 @@
%r
- Port number of the new master node
+ Port number of the new main node%R
- Database cluster directory of the new master node
+ Database cluster directory of the new main node%N
@@ -193,12 +193,12 @@
- The "master node" refers to a node which has the
+ The "main node" refers to a node which has the
"youngest (or the smallest) node id" among live the
database nodes. In streaming
replication mode, this may be different from
primary node. In ,
- %m is the new master node chosen
+ %m is the new main node chosen
by Pgpool-II. It is the node
being assigned the youngest (smallest) node id which is
alive. For example if you have 3 nodes, namely node 0, 1,
@@ -301,15 +301,15 @@
%M
- Old master node ID
+ Old main node ID%m
- New master node ID
+ New main node ID%H
- Hostname of the new master node
+ Hostname of the new main node%P
@@ -317,11 +317,11 @@
%r
- Port number of the new master node
+ Port number of the new main node%R
- Database cluster directory of the new master node
+ Database cluster directory of the new main node%N
@@ -359,10 +359,10 @@
-
- follow_master_command (string)
+
+ follow_primary_command (string)
- follow_master_command configuration parameter
+ follow_primary_command configuration parameter
@@ -373,13 +373,13 @@
command will not be executed. This command also runs if a
node promote request is issued by
command. This works only
- in Master Replication mode with streaming replication.
+ in Replication mode with streaming replication.
Since the command is executed within a child process forked
off by Pgpool-II after failover
- is completed, execution of follow master command does not
+ is completed, execution of follow primary command does not
block the service
of Pgpool-II. Here is a pseud
code to illustrate how the command is executed:
@@ -388,15 +388,15 @@
{
if (the node is not the new primary)
set down node status to shared memory status
- memorize that follow master command is needed to execute
+ memorize that follow primary command is needed to execute
}
- if (we need to executed follow master command)
+ if (we need to executed follow primary command)
fork a child process
(within the child process)
for each backend node
if (the node status in shared memory is down)
- execute follow master command
+ execute follow primary command
@@ -405,8 +405,8 @@
with the backend specific information before executing the command.
-
- follow master command options
+
+ follow primary command options
@@ -434,7 +434,7 @@
%M
- Old master node ID
+ Old main node ID%m
@@ -475,19 +475,19 @@
- If follow_master_command is not empty, then after failover
+ If follow_primary_command is not empty, then after failover
on the primary node gets completed in Native Replication mode with streaming replication,
Pgpool-II degenerates all nodes except the new primary
and starts new child processes to be ready again to accept connections from the clients.
After this, Pgpool-II executes the command configured
- in the follow_master_command for each degenerated backend nodes.
+ in the follow_primary_command for each degenerated backend nodes.
- Typically follow_master_command command
+ Typically follow_primary_command command
is used to recover the standby from the new primary by calling
the pcp_recovery_node command. In
- the follow_master_command, it is
+ the follow_primary_command, it is
recommended to check whether
target PostgreSQL node is running
or not using pg_ctl since already stopped node usually has a
@@ -495,7 +495,7 @@
problems or administrator is maintaining the node. If the
node is stopped, skip the node. If the node is running, stop
the node first and recovery it. A
- complete follow_master_command example
+ complete follow_primary_command example
can be found in .
diff --git a/doc/src/sgml/ref/pcp_promote_node.sgml b/doc/src/sgml/ref/pcp_promote_node.sgml
index de8c5ff..927e710 100644
--- a/doc/src/sgml/ref/pcp_promote_node.sgml
+++ b/doc/src/sgml/ref/pcp_promote_node.sgml
@@ -17,7 +17,7 @@ Pgpool-II documentation
pcp_promote_node
- promotes the given node as new master to Pgpool-II
+ promotes the given node as new main to Pgpool-II
@@ -38,9 +38,9 @@ Pgpool-II documentation
pcp_promote_node executes followings. Please be
- warned that if is set,
+ warned that if is set,
the command will be executed. It is a standard advice that you
- disable before executing
+ disable before executing
this command.
@@ -67,8 +67,8 @@ Pgpool-II documentation
- If is set, execute
- against
+ If is set, execute
+ against
PostgreSQL.
@@ -88,7 +88,7 @@ Pgpool-II documentation
- The index of backend node to promote as new master.
+ The index of backend node to promote as new main.
diff --git a/doc/src/sgml/ref/pgpool_setup.sgml b/doc/src/sgml/ref/pgpool_setup.sgml
index 3a0800e..c86ebec 100644
--- a/doc/src/sgml/ref/pgpool_setup.sgml
+++ b/doc/src/sgml/ref/pgpool_setup.sgml
@@ -301,7 +301,7 @@ temporarily start pgpool-II to create standby nodes
recovery node 1...pcp_recovery_node -- Command Successful
done.
-creating follow master script
+creating follow primary script
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay | replication_state | replication_sync_state | last_status_change
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------+-------------------+------------------------+---------------------
0 | /tmp | 11002 | up | 0.500000 | primary | 0 | true | 0 | | | 2020-08-18 13:50:19
diff --git a/doc/src/sgml/ref/watchdog_setup.sgml b/doc/src/sgml/ref/watchdog_setup.sgml
index 18fb13d..9b932db 100644
--- a/doc/src/sgml/ref/watchdog_setup.sgml
+++ b/doc/src/sgml/ref/watchdog_setup.sgml
@@ -249,7 +249,7 @@ Pgpool-II documentation
recovery node 1...pcp_recovery_node -- Command Successful
done.
- creating follow master script
+ creating follow primary script
Pager usage is off.
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------
@@ -294,7 +294,7 @@ Pgpool-II documentation
recovery node 1...pcp_recovery_node -- Command Successful
done.
- creating follow master script
+ creating follow primary script
Pager usage is off.
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------
@@ -339,7 +339,7 @@ Pgpool-II documentation
recovery node 1...pcp_recovery_node -- Command Successful
done.
- creating follow master script
+ creating follow primary script
Pager usage is off.
node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay
---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------
diff --git a/doc/src/sgml/restrictions.sgml b/doc/src/sgml/restrictions.sgml
index b0e180a..19932f7 100644
--- a/doc/src/sgml/restrictions.sgml
+++ b/doc/src/sgml/restrictions.sgml
@@ -127,7 +127,7 @@
Creating/inserting/updating/deleting temporary tables are
always executed on the primary in native replication mode.
- SELECT on these tables is executed on master as well. However
+ SELECT on these tables is executed on primary as well. However
if the temporary table name is used as a literal in SELECT,
there's no way to detect it, and the SELECT will be load
balanced. That will trigger a "not found the table" error or
@@ -142,7 +142,7 @@
SELECT 't1'::regclass::oid;
In such that case Pgpool-II always
- sends the query to master and will not cause the problem.
+ sends the query to primary and will not cause the problem.
@@ -183,7 +183,7 @@
using CURRENT_TIMESTAMP, CURRENT_DATE,
now() as their DEFAULT values will also
be replicated correctly. This is done by replacing those
- functions by constants fetched from master at query execution
+ functions by constants fetched from primary at query execution
time. There are a few limitations however:
diff --git a/doc/src/sgml/start.sgml b/doc/src/sgml/start.sgml
index 9ca51ba..ad9ed08 100644
--- a/doc/src/sgml/start.sgml
+++ b/doc/src/sgml/start.sgml
@@ -266,23 +266,23 @@
# %h = host name
# %p = port number
# %D = database cluster path
- # %m = new master node id
- # %M = old master node id
- # %H = new master node host name
+ # %m = new main node id
+ # %M = old main node id
+ # %H = new main node host name
# %P = old primary node id
- # %R = new master database cluster path
- # %r = new master port number
+ # %R = new main database cluster path
+ # %r = new main port number
# %% = '%' character
failed_node_id=$1
failed_host_name=$2
failed_port=$3
failed_db_cluster=$4
- new_master_id=$5
- old_master_id=$6
- new_master_host_name=$7
+ new_main_id=$5
+ old_main_id=$6
+ new_main_host_name=$7
old_primary_node_id=$8
- new_master_port_number=$9
- new_master_db_cluster=${10}
+ new_main_port_number=$9
+ new_main_db_cluster=${10}
mydir=/home/t-ishii/tmp/Tutorial
log=$mydir/log/failover.log
pg_ctl=/usr/local/pgsql/bin/pg_ctl
@@ -290,10 +290,10 @@
cluster1=$mydir/data1
date >> $log
- echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_master_id $new_master_id old_master_id $old_master_id new_master_host_name $new_master_host_name old_primary_node_id $old_primary_node_id new_master_port_number $new_master_port_number new_master_db_cluster $new_master_db_cluster" >> $log
+ echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_main_id $new_main_id old_main_id $old_main_id new_main_host_name $new_main_host_name old_primary_node_id $old_primary_node_id new_main_port_number $new_main_port_number new_main_db_cluster $new_main_db_cluster" >> $log
- if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # master failed
- ! new_primary_db_cluster=${mydir}/data"$new_master_id"
+ if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # main failed
+ ! new_primary_db_cluster=${mydir}/data"$new_main_id"
echo $pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over
$pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over
sleep 2
@@ -349,7 +349,7 @@
DATADIR_BASE=/home/t-ishii/tmp/Tutorial
PGSUPERUSER=t-ishii
- master_db_cluster=$1
+ main_db_cluster=$1
recovery_node_host_name=$2
DEST_CLUSTER=$3
PORT=$4
@@ -364,13 +364,13 @@
$psql -p $PORT -c "SELECT pg_start_backup('Streaming Replication', true)" postgres
- echo "source: $master_db_cluster dest: $DEST_CLUSTER" >> $log
+ echo "source: $main_db_cluster dest: $DEST_CLUSTER" >> $log
rsync -C -a -c --delete --exclude postgresql.conf --exclude postmaster.pid \
--exclude postmaster.opts --exclude pg_log \
--exclude recovery.conf --exclude recovery.done \
--exclude pg_xlog \
- $master_db_cluster/ $DEST_CLUSTER/
+ $main_db_cluster/ $DEST_CLUSTER/
rm -fr $DEST_CLUSTER/pg_xlog
mkdir $DEST_CLUSTER/pg_xlog
diff --git a/doc/src/sgml/watchdog.sgml b/doc/src/sgml/watchdog.sgml
index a2e205f..be177c8 100644
--- a/doc/src/sgml/watchdog.sgml
+++ b/doc/src/sgml/watchdog.sgml
@@ -520,10 +520,10 @@
Although there are many similarities in quarantine and failover operations, but they both differ in a very
fundamental way. The quarantine operations does not executes the
- and silently detaches the problematic node, So in the case when the master backend node is quarantined, the
- Pgpool-II will not promote the standby to take over the master responsibilities
- and until the master node is quarantined the Pgpool-II will not have
- any usable master backend node.
+ and silently detaches the problematic node, So in the case when the main backend node is quarantined, the
+ Pgpool-II will not promote the standby to take over the main node responsibilities
+ and until the main node is quarantined the Pgpool-II will not have
+ any usable main backend node.
Moreover, unlike for the failed nodes,
diff --git a/src/Makefile.am b/src/Makefile.am
index 0b18141..deb8392 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -79,7 +79,7 @@ sysconf_DATA = sample/pgpool.conf.sample \
sample/pgpool.conf.sample-raw \
sample/pgpool.conf.sample-snapshot \
sample/scripts/failover.sh.sample \
- sample/scripts/follow_master.sh.sample \
+ sample/scripts/follow_primary.sh.sample \
sample/scripts/pgpool_remote_start.sample \
sample/scripts/recovery_1st_stage.sample \
sample/scripts/recovery_2nd_stage.sample
@@ -105,7 +105,7 @@ AM_YFLAGS = -d
EXTRA_DIST = sample/pgpool.pam \
sample/scripts/failover.sh.sample \
- sample/scripts/follow_master.sh.sample \
+ sample/scripts/follow_primary.sh.sample \
sample/scripts/pgpool_remote_start.sample \
sample/scripts/recovery_1st_stage.sample \
sample/scripts/recovery_2nd_stage.sample \
diff --git a/src/Makefile.in b/src/Makefile.in
index d5535f3..4a678f1 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -507,7 +507,7 @@ sysconf_DATA = sample/pgpool.conf.sample \
sample/pgpool.conf.sample-raw \
sample/pgpool.conf.sample-snapshot \
sample/scripts/failover.sh.sample \
- sample/scripts/follow_master.sh.sample \
+ sample/scripts/follow_primary.sh.sample \
sample/scripts/pgpool_remote_start.sample \
sample/scripts/recovery_1st_stage.sample \
sample/scripts/recovery_2nd_stage.sample
@@ -528,7 +528,7 @@ pgpool_LDADD = -L@PGSQL_LIB_DIR@ -lpq parser/libsql-parser.a \
AM_YFLAGS = -d
EXTRA_DIST = sample/pgpool.pam \
sample/scripts/failover.sh.sample \
- sample/scripts/follow_master.sh.sample \
+ sample/scripts/follow_primary.sh.sample \
sample/scripts/pgpool_remote_start.sample \
sample/scripts/recovery_1st_stage.sample \
sample/scripts/recovery_2nd_stage.sample \
diff --git a/src/config/pool_config_variables.c b/src/config/pool_config_variables.c
index 4156fde..b5fab2e 100644
--- a/src/config/pool_config_variables.c
+++ b/src/config/pool_config_variables.c
@@ -934,11 +934,11 @@ static struct config_string ConfigureNamesString[] =
},
{
- {"follow_master_command", CFGCXT_RELOAD, FAILOVER_CONFIG,
- "Command to execute in master/slave streaming replication mode after a master node failover.",
+ {"follow_primary_command", CFGCXT_RELOAD, FAILOVER_CONFIG,
+ "Command to execute in streaming replication mode after a primary node failover.",
CONFIG_VAR_TYPE_STRING, false, 0
},
- &g_pool_config.follow_master_command,
+ &g_pool_config.follow_primary_command,
"",
NULL, NULL, NULL, NULL
},
diff --git a/src/context/pool_query_context.c b/src/context/pool_query_context.c
index f67dc21..d82923e 100644
--- a/src/context/pool_query_context.c
+++ b/src/context/pool_query_context.c
@@ -483,7 +483,7 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
*/
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
- else if (MASTER_SLAVE)
+ else if (NATIVE_REPLICATION)
{
POOL_DEST dest;
diff --git a/src/include/pcp/pcp.h b/src/include/pcp/pcp.h
index 6df3eb1..aa7bd89 100644
--- a/src/include/pcp/pcp.h
+++ b/src/include/pcp/pcp.h
@@ -54,8 +54,8 @@ typedef struct PCPWDClusterInfo
int quorumStatus;
int aliveNodeCount;
bool escalated;
- char masterNodeName[WD_MAX_HOST_NAMELEN];
- char masterHostName[WD_MAX_HOST_NAMELEN];
+ char leaderNodeName[WD_MAX_HOST_NAMELEN];
+ char leaderHostName[WD_MAX_HOST_NAMELEN];
int nodeCount;
PCPWDNodeInfo nodeList[1];
} PCPWDClusterInfo;
diff --git a/src/include/pool_config.h b/src/include/pool_config.h
index 86085ca..5020aa4 100644
--- a/src/include/pool_config.h
+++ b/src/include/pool_config.h
@@ -252,7 +252,7 @@ typedef struct
bool load_balance_mode; /* load balance mode */
bool replication_stop_on_mismatch; /* if there's a data mismatch
- * between master and
+ * between primary and
* secondary start
* degeneration to stop
* replication mode */
@@ -328,7 +328,7 @@ typedef struct
char *sr_check_database; /* PostgreSQL database name for streaming
* replication check */
char *failover_command; /* execute command when failover happens */
- char *follow_master_command; /* execute command when failover is
+ char *follow_primary_command; /* execute command when failover is
* ended */
char *failback_command; /* execute command when failback happens */
diff --git a/src/include/protocol/pool_proto_modules.h b/src/include/protocol/pool_proto_modules.h
index 0d84e89..8191e84 100644
--- a/src/include/protocol/pool_proto_modules.h
+++ b/src/include/protocol/pool_proto_modules.h
@@ -161,8 +161,8 @@ extern int check_copy_from_stdin(Node *node); /* returns non 0 if this is a
extern void query_ps_status(char *query, POOL_CONNECTION_POOL * backend); /* show ps status */
extern POOL_STATUS start_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node *node);
extern POOL_STATUS end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern int detect_deadlock_error(POOL_CONNECTION * master, int major);
-extern int detect_serialization_error(POOL_CONNECTION * master, int major, bool unread);
+extern int detect_deadlock_error(POOL_CONNECTION * backend, int major);
+extern int detect_serialization_error(POOL_CONNECTION * backend, int major, bool unread);
extern int detect_active_sql_transaction_error(POOL_CONNECTION * backend, int major);
extern int detect_query_cancel_error(POOL_CONNECTION * backend, int major);
extern int detect_idle_in_transaction_sesion_timeout_error(POOL_CONNECTION * backend, int major);
diff --git a/src/libs/pcp/pcp.c b/src/libs/pcp/pcp.c
index 865dd02..44e16ba 100644
--- a/src/libs/pcp/pcp.c
+++ b/src/libs/pcp/pcp.c
@@ -1424,7 +1424,7 @@ pcp_recovery_node(PCPConnInfo * pcpConn, int nid)
}
/* --------------------------------
- * pcp_promote_node - promote a node given by the argument as new pgpool's master
+ * pcp_promote_node - promote a node given by the argument as new pgpool's main node
*
* return 0 on success, -1 otherwise
* --------------------------------
@@ -1437,7 +1437,7 @@ pcp_promote_node(PCPConnInfo * pcpConn, int nid)
/* --------------------------------
- * and promote a node given by the argument as new pgpool's master
+ * and promote a node given by the argument as new pgpool's main node
*
* return 0 on success, -1 otherwise
* --------------------------------
@@ -1563,21 +1563,21 @@ process_watchdog_info_response(PCPConnInfo * pcpConn, char *buf, int len)
}
wd_cluster_info->escalated = tempVal == 0 ? false : true;
- ptr = json_get_string_value_for_key(root, "MasterNodeName");
+ ptr = json_get_string_value_for_key(root, "LeaderNodeName");
if (ptr == NULL)
{
json_value_free(root);
goto INVALID_RESPONSE;
}
- strncpy(wd_cluster_info->masterNodeName, ptr, sizeof(wd_cluster_info->masterNodeName) - 1);
+ strncpy(wd_cluster_info->leaderNodeName, ptr, sizeof(wd_cluster_info->leaderNodeName) - 1);
- ptr = json_get_string_value_for_key(root, "MasterHostName");
+ ptr = json_get_string_value_for_key(root, "LeaderHostName");
if (ptr == NULL)
{
json_value_free(root);
goto INVALID_RESPONSE;
}
- strncpy(wd_cluster_info->masterHostName, ptr, sizeof(wd_cluster_info->masterHostName) - 1);
+ strncpy(wd_cluster_info->leaderHostName, ptr, sizeof(wd_cluster_info->leaderHostName) - 1);
/* Get watchdog nodes data */
for (i = 0; i < nodeCount; i++)
diff --git a/src/main/pgpool_main.c b/src/main/pgpool_main.c
index 21ef504..21f1ad5 100644
--- a/src/main/pgpool_main.c
+++ b/src/main/pgpool_main.c
@@ -125,7 +125,7 @@ static void wakeup_children(void);
static void reload_config(void);
static int pool_pause(struct timeval *timeout);
static void kill_all_children(int sig);
-static pid_t fork_follow_child(int old_master, int new_primary, int old_primary);
+static pid_t fork_follow_child(int old_main_node, int new_primary, int old_primary);
static int read_status_file(bool discard_status);
static RETSIGTYPE exit_handler(int sig);
static RETSIGTYPE reap_handler(int sig);
@@ -136,7 +136,7 @@ static RETSIGTYPE wakeup_handler(int sig);
static void initialize_shared_mem_objects(bool clear_memcache_oidmaps);
static int trigger_failover_command(int node, const char *command_line,
- int old_master, int new_master, int old_primary);
+ int old_main_node, int new_main_node, int old_primary);
static int find_primary_node(void);
static int find_primary_node_repeatedly(void);
static void terminate_all_childrens(int sig);
@@ -1163,7 +1163,7 @@ get_next_main_node(void)
{
/*
* Do not use VALID_BACKEND macro in raw mode. VALID_BACKEND return
- * true only if the argument is master node id. In other words,
+ * true only if the argument is main node id. In other words,
* standby nodes are false. So need to check backend status with
* VALID_BACKEND_RAW.
*/
@@ -1318,7 +1318,7 @@ failover(void)
j,
k;
int node_id;
- int new_master;
+ int new_main_node;
int new_primary = -1;
int nodes[MAX_NUM_BACKENDS];
bool need_to_restart_children = true;
@@ -1420,7 +1420,7 @@ failover(void)
*/
ereport(DEBUG1,
(errmsg("failover handler"),
- errdetail("starting to select new master node")));
+ errdetail("starting to select new main node")));
node_id = node_id_set[0];
/* failback request? */
@@ -1596,9 +1596,9 @@ failover(void)
}
}
- new_master = get_next_main_node();
+ new_main_node = get_next_main_node();
- if (new_master < 0)
+ if (new_main_node < 0)
{
ereport(LOG,
(errmsg("failover: no valid backend node found")));
@@ -1732,7 +1732,7 @@ failover(void)
if (nodes[i])
{
trigger_failover_command(i, pool_config->failover_command,
- MAIN_NODE_ID, new_master, REAL_PRIMARY_NODE_ID);
+ MAIN_NODE_ID, new_main_node, REAL_PRIMARY_NODE_ID);
sync_required = true;
}
}
@@ -1796,7 +1796,7 @@ failover(void)
}
/*
- * If follow_master_command is provided and in master/slave streaming
+ * If follow_primary_command is provided and in streaming
* replication mode, we start degenerating all backends as they are
* not replicated anymore.
*/
@@ -1804,7 +1804,7 @@ failover(void)
if (STREAM)
{
- if (*pool_config->follow_master_command != '\0' ||
+ if (*pool_config->follow_primary_command != '\0' ||
reqkind == PROMOTE_NODE_REQUEST)
{
/* only if the failover is against the current primary */
@@ -1842,8 +1842,8 @@ failover(void)
}
else
{
- /* update new master node */
- new_master = get_next_main_node();
+ /* update new primary node */
+ new_main_node = get_next_main_node();
ereport(LOG,
(errmsg("failover: %d follow backends have been degenerated", follow_cnt)));
}
@@ -1851,9 +1851,9 @@ failover(void)
}
}
- if ((follow_cnt > 0) && (*pool_config->follow_master_command != '\0'))
+ if ((follow_cnt > 0) && (*pool_config->follow_primary_command != '\0'))
{
- follow_pid = fork_follow_child(Req_info->master_node_id, new_primary,
+ follow_pid = fork_follow_child(Req_info->primary_node_id, new_primary,
Req_info->primary_node_id);
}
@@ -1874,12 +1874,12 @@ failover(void)
ereport(LOG,
(errmsg("failover: set new primary node: %d", Req_info->primary_node_id)));
- if (new_master >= 0)
+ if (new_main_node >= 0)
{
- Req_info->main_node_id = new_master;
+ Req_info->main_node_id = new_main_node;
sync_required = true;
ereport(LOG,
- (errmsg("failover: set new master node: %d", Req_info->main_node_id)));
+ (errmsg("failover: set new main node: %d", Req_info->main_node_id)));
}
@@ -2651,14 +2651,14 @@ pool_sleep(unsigned int second)
*/
static int
trigger_failover_command(int node, const char *command_line,
- int old_master, int new_master, int old_primary)
+ int old_main_node, int new_main_node, int old_primary)
{
int r = 0;
String *exec_cmd;
char port_buf[6];
char buf[2];
BackendInfo *info;
- BackendInfo *newmaster;
+ BackendInfo *newmain;
BackendInfo *oldprimary;
if (command_line == NULL || (strlen(command_line) == 0))
@@ -2703,43 +2703,43 @@ trigger_failover_command(int node, const char *command_line,
string_append_char(exec_cmd, info->backend_hostname);
break;
- case 'H': /* new master host name */
- newmaster = pool_get_node_info(new_master);
- if (newmaster)
- string_append_char(exec_cmd, newmaster->backend_hostname);
+ case 'H': /* new main host name */
+ newmain = pool_get_node_info(new_main_node);
+ if (newmain)
+ string_append_char(exec_cmd, newmain->backend_hostname);
else
- /* no valid new master */
+ /* no valid new main */
string_append_char(exec_cmd, "\"\"");
break;
- case 'm': /* new master node id */
- snprintf(port_buf, sizeof(port_buf), "%d", new_master);
+ case 'm': /* new main node id */
+ snprintf(port_buf, sizeof(port_buf), "%d", new_main_node);
string_append_char(exec_cmd, port_buf);
break;
- case 'r': /* new master port */
- newmaster = pool_get_node_info(get_next_main_node());
- if (newmaster)
+ case 'r': /* new main node port */
+ newmain = pool_get_node_info(get_next_main_node());
+ if (newmain)
{
- snprintf(port_buf, sizeof(port_buf), "%d", newmaster->backend_port);
+ snprintf(port_buf, sizeof(port_buf), "%d", newmain->backend_port);
string_append_char(exec_cmd, port_buf);
}
else
- /* no valid new master */
+ /* no valid new main node */
string_append_char(exec_cmd, "\"\"");
break;
- case 'R': /* new master database directory */
- newmaster = pool_get_node_info(get_next_main_node());
- if (newmaster)
- string_append_char(exec_cmd, newmaster->backend_data_directory);
+ case 'R': /* new main database directory */
+ newmain = pool_get_node_info(get_next_main_node());
+ if (newmain)
+ string_append_char(exec_cmd, newmain->backend_data_directory);
else
- /* no valid new master */
+ /* no valid new main */
string_append_char(exec_cmd, "\"\"");
break;
- case 'M': /* old master node id */
- snprintf(port_buf, sizeof(port_buf), "%d", old_master);
+ case 'M': /* old main node id */
+ snprintf(port_buf, sizeof(port_buf), "%d", old_main_node);
string_append_char(exec_cmd, port_buf);
break;
@@ -3230,7 +3230,7 @@ find_primary_node_repeatedly(void)
* fork a follow child
*/
pid_t
-fork_follow_child(int old_master, int new_primary, int old_primary)
+fork_follow_child(int old_main_node, int new_primary, int old_primary)
{
pid_t pid;
int i;
@@ -3249,8 +3249,8 @@ fork_follow_child(int old_master, int new_primary, int old_primary)
bkinfo = pool_get_node_info(i);
if (bkinfo->backend_status == CON_DOWN)
- trigger_failover_command(i, pool_config->follow_master_command,
- old_master, new_primary, old_primary);
+ trigger_failover_command(i, pool_config->follow_primary_command,
+ old_main_node, new_primary, old_primary);
}
exit(0);
}
diff --git a/src/pcp_con/recovery.c b/src/pcp_con/recovery.c
index 809a046..34f746f 100644
--- a/src/pcp_con/recovery.c
+++ b/src/pcp_con/recovery.c
@@ -45,7 +45,7 @@
#define SECOND_STAGE 1
static void exec_checkpoint(PGconn *conn);
-static void exec_recovery(PGconn *conn, BackendInfo * master_backend, BackendInfo * recovery_backend, char stage, int recovery_node);
+static void exec_recovery(PGconn *conn, BackendInfo * main_backend, BackendInfo * recovery_backend, char stage, int recovery_node);
static void exec_remote_start(PGconn *conn, BackendInfo * backend);
static PGconn *connect_backend_libpq(BackendInfo * backend);
static void check_postmaster_started(BackendInfo * backend);
@@ -234,7 +234,7 @@ exec_checkpoint(PGconn *conn)
* Call pgpool_recovery() function.
*/
static void
-exec_recovery(PGconn *conn, BackendInfo * master_backend, BackendInfo * recovery_backend, char stage, int recovery_node)
+exec_recovery(PGconn *conn, BackendInfo * main_backend, BackendInfo * recovery_backend, char stage, int recovery_node)
{
PGresult *result;
char *hostname;
@@ -263,7 +263,7 @@ exec_recovery(PGconn *conn, BackendInfo * master_backend, BackendInfo * recovery
script,
hostname,
recovery_backend->backend_data_directory,
- master_backend->backend_port,
+ main_backend->backend_port,
recovery_node,
recovery_backend->backend_port
);
diff --git a/src/pgpool.spec b/src/pgpool.spec
index 9daf31f..211d0a0 100644
--- a/src/pgpool.spec
+++ b/src/pgpool.spec
@@ -269,7 +269,7 @@ fi
%{_sysconfdir}/%{short_name}/pool_hba.conf.sample
%defattr(755,postgres,postgres,-)
%{_sysconfdir}/%{short_name}/failover.sh.sample
-%{_sysconfdir}/%{short_name}/follow_master.sh.sample
+%{_sysconfdir}/%{short_name}/follow_primary.sh.sample
%{_sysconfdir}/%{short_name}/pgpool_remote_start.sample
%{_sysconfdir}/%{short_name}/recovery_1st_stage.sample
%{_sysconfdir}/%{short_name}/recovery_2nd_stage.sample
diff --git a/src/protocol/pool_process_query.c b/src/protocol/pool_process_query.c
index 052f56b..43bb458 100644
--- a/src/protocol/pool_process_query.c
+++ b/src/protocol/pool_process_query.c
@@ -83,15 +83,15 @@ static int reset_backend(POOL_CONNECTION_POOL * backend, int qcnt);
static char *get_insert_command_table_name(InsertStmt *node);
static bool is_cache_empty(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
static bool is_panic_or_fatal_error(char *message, int major);
-static int extract_message(POOL_CONNECTION * master, char *error_code, int major, char class, bool unread);
-static int detect_postmaster_down_error(POOL_CONNECTION * master, int major);
+static int extract_message(POOL_CONNECTION * backend, char *error_code, int major, char class, bool unread);
+static int detect_postmaster_down_error(POOL_CONNECTION * backend, int major);
static bool is_internal_transaction_needed(Node *node);
static bool pool_has_insert_lock(void);
static POOL_STATUS add_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table);
static bool has_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table, bool for_update);
static POOL_STATUS insert_oid_into_insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table);
static POOL_STATUS read_packets_and_process(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int reset_request, int *state, short *num_fields, bool *cont);
-static bool is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int master);
+static bool is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int main_node);
static bool pool_process_notice_message_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int backend_idx, char kind);
/*
@@ -297,7 +297,7 @@ pool_process_query(POOL_CONNECTION * frontend,
else
{
/*
- * If we have pending data in master, we need to process
+ * If we have pending data in main, we need to process
* it
*/
if (pool_ssl_pending(MAIN(backend)) ||
@@ -318,7 +318,7 @@ pool_process_query(POOL_CONNECTION * frontend,
!pool_read_buffer_is_empty(CONNECTION(backend, i)))
{
/*
- * If we have pending data in master, we need
+ * If we have pending data in main, we need
* to process it
*/
if (IS_MAIN_NODE_ID(i))
@@ -335,7 +335,7 @@ pool_process_query(POOL_CONNECTION * frontend,
char *string;
/*
- * If master does not have pending data,
+ * If main does not have pending data,
* we discard one packet from other
* backend
*/
@@ -358,7 +358,7 @@ pool_process_query(POOL_CONNECTION * frontend,
* MAIN_NODE_ID will be pointing
* to the standby node. And we
* will get stuck if we keep
- * waiting for the current master
+ * waiting for the current main
* node (standby) in this case to
* send us the NOTIFY message. see
* "0000116: LISTEN Notifications
@@ -387,16 +387,16 @@ pool_process_query(POOL_CONNECTION * frontend,
* sent to all backends. However
* the order of arrival of
* 'Notification response' is not
- * necessarily the master first
+ * necessarily the main first
* and then standbys. So if it
* arrives standby first, we should
- * try to read from master, rather
+ * try to read from main, rather
* than just discard it.
*/
pool_unread(CONNECTION(backend, i), &kind, sizeof(kind));
ereport(LOG,
(errmsg("pool process query"),
- errdetail("received %c packet from backend %d. Don't dicard and read %c packet from master", kind, i, kind)));
+ errdetail("received %c packet from backend %d. Don't dicard and read %c packet from main", kind, i, kind)));
pool_read_with_error(CONNECTION(backend, MAIN_NODE_ID), &kind, sizeof(kind),
"reading message kind from backend");
@@ -755,7 +755,7 @@ SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend,
if (len != len1)
{
ereport(DEBUG1,
- (errmsg("SimpleForwardToFrontend: length does not match between backends master(%d) %d th backend(%d) kind:(%c)",
+ (errmsg("SimpleForwardToFrontend: length does not match between backends main(%d) %d th backend(%d) kind:(%c)",
len, i, len1, kind)));
}
}
@@ -1613,7 +1613,7 @@ retry_read_packet:
* from backend and discard it until we get Error response.
*
* We need to sync transaction status in transaction block.
- * SELECT query is sent to master only.
+ * SELECT query is sent to main node only.
* If SELECT is error, we must abort transaction on other nodes.
*/
void
@@ -1667,11 +1667,11 @@ do_error_command(POOL_CONNECTION * backend, int major)
/*
* Send invalid portal execution to specified DB node to abort current
- * transaction. Pgpool-II sends a SELECT query to master node only in
- * load balance mode. Problem is, if the query failed, master node
+ * transaction. Pgpool-II sends a SELECT query to main node only in
+ * load balance mode. Problem is, if the query failed, main node
* goes to abort status while other nodes remain normal status. To
* sync transaction status in each node, we send error query to other
- * than master node to ket them go into abort status.
+ * than main node to ket them go into abort status.
*/
void
do_error_execute_command(POOL_CONNECTION_POOL * backend, int node_id, int major)
@@ -3159,14 +3159,14 @@ read_kind_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * ba
* returns true if all standbys status are 'C' (Command Complete)
*/
static bool
-is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int master)
+is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int main_node)
{
int i;
int ok = true;
for (i = 0; i < num_backends; i++)
{
- if (i == master || kind_list[i] == 0)
+ if (i == main_node || kind_list[i] == 0)
continue;
if (kind_list[i] != 'C')
{
@@ -3281,7 +3281,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
{
ereport(DEBUG5,
(errmsg("reading backend data packet kind"),
- errdetail("master node id: %d", MAIN_NODE_ID)));
+ errdetail("main node id: %d", MAIN_NODE_ID)));
read_kind_from_one_backend(frontend, backend, (char *) &kind, MAIN_NODE_ID);
@@ -3296,7 +3296,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
ereport(DEBUG5,
(errmsg("reading backend data packet kind"),
- errdetail("received notification message for master node %d",
+ errdetail("received notification message for main node %d",
MAIN_NODE_ID)));
if (msg)
pool_pending_message_free_pending_message(msg);
@@ -3458,7 +3458,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
/*
* If we are in streaming replication mode and kind = 'Z' (ready for
- * query) on master and kind on standby is not 'Z', it is likely that
+ * query) on primary and kind on standby is not 'Z', it is likely that
* following scenario happened.
*
* FE=>Parse("BEGIN") FE=>Bind FE=>Execute("BEGIN");
@@ -3505,8 +3505,8 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
* cases it is possible that similar issue could happen since returned
* messages do not follow the sequence recorded in the pending
* messages because the backend ignores requests till sync message is
- * received. In this case we need to re-sync either master or standby.
- * So we check not only the standby but master node.
+ * received. In this case we need to re-sync either primary or standby.
+ * So we check not only the standby but primary node.
*/
if (session_context->load_balance_node_id != MAIN_NODE_ID &&
(kind_list[MAIN_NODE_ID] == 'Z' ||
@@ -3604,7 +3604,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
}
else if (max_count <= NUM_BACKENDS / 2.0)
{
- /* no one gets majority. We trust master node's kind */
+ /* no one gets majority. We trust main node's kind */
trust_kind = kind_list[MAIN_NODE_ID];
}
else /* max_count > NUM_BACKENDS / 2.0 */
@@ -3622,7 +3622,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
{
/* degenerate */
ereport(WARNING,
- (errmsg("packet kind of backend %d ['%c'] does not match with master/majority nodes packet kind ['%c']", i, kind_list[i], trust_kind)));
+ (errmsg("packet kind of backend %d ['%c'] does not match with main/majority nodes packet kind ['%c']", i, kind_list[i], trust_kind)));
degenerate_node[degenerate_node_num++] = i;
}
}
@@ -4077,7 +4077,7 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back
PG_TRY();
{
- /* We need to commit from secondary to master. */
+ /* We need to commit from secondary to primary. */
for (i = 0; i < NUM_BACKENDS; i++)
{
if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i) &&
@@ -4115,7 +4115,7 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back
}
}
- /* Commit on master */
+ /* Commit on main */
if (TSTATE(backend, MAIN_NODE_ID) != 'I' &&
INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID))
{
@@ -5037,7 +5037,7 @@ pool_push_pending_data(POOL_CONNECTION * backend)
/*
* If we have not send the flush message to load balance node yet, send a
* flush message to the load balance node. Otherwise only the non load
- * balance node (usually the master node) produces response if we do not
+ * balance node (usually the primary node) produces response if we do not
* send sync message to it yet.
*/
session_context = pool_get_session_context(false);
diff --git a/src/sample/pgpool.conf.sample-logical b/src/sample/pgpool.conf.sample-logical
index db0857c..3fd624d 100644
--- a/src/sample/pgpool.conf.sample-logical
+++ b/src/sample/pgpool.conf.sample-logical
@@ -423,19 +423,19 @@ delay_threshold = 10000000
# - Special commands -
-follow_master_command = ''
- # Executes this command after master failover
+follow_primary_command = ''
+ # Executes this command after main node failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -493,12 +493,12 @@ failover_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -509,12 +509,12 @@ failback_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -806,10 +806,10 @@ check_temp_table = catalog
check_unlogged_table = on
# If on, enable unlogged table check in SELECT statements.
- # This initiates queries against system catalog of primary/master
- # thus increases load of master.
+ # This initiates queries against system catalog of primary/main
+ # thus increases load of primary.
# If you are absolutely sure that your system never uses unlogged tables
- # and you want to save access to primary/master, you could turn this off.
+ # and you want to save access to primary/main, you could turn this off.
# Default is on.
enable_shared_relcache = on
# If on, relation cache stored in memory cache,
diff --git a/src/sample/pgpool.conf.sample-raw b/src/sample/pgpool.conf.sample-raw
index fd04ceb..c250556 100644
--- a/src/sample/pgpool.conf.sample-raw
+++ b/src/sample/pgpool.conf.sample-raw
@@ -463,19 +463,19 @@ delay_threshold = 10000000
# - Special commands -
-follow_master_command = ''
- # Executes this command after master failover
+follow_primary_command = ''
+ # Executes this command after main node failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -533,12 +533,12 @@ failover_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -549,12 +549,12 @@ failback_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -850,10 +850,10 @@ check_temp_table = catalog
check_unlogged_table = on
# If on, enable unlogged table check in SELECT statements.
- # This initiates queries against system catalog of primary/master
- # thus increases load of master.
+ # This initiates queries against system catalog of primary/main
+ # thus increases load of primary.
# If you are absolutely sure that your system never uses unlogged tables
- # and you want to save access to primary/master, you could turn this off.
+ # and you want to save access to primary/main, you could turn this off.
# Default is on.
enable_shared_relcache = on
# If on, relation cache stored in memory cache,
diff --git a/src/sample/pgpool.conf.sample-replication b/src/sample/pgpool.conf.sample-replication
index 6525aa7..5a22093 100644
--- a/src/sample/pgpool.conf.sample-replication
+++ b/src/sample/pgpool.conf.sample-replication
@@ -459,19 +459,19 @@ delay_threshold = 0
# - Special commands -
-follow_master_command = ''
- # Executes this command after master failover
+follow_primary_command = ''
+ # Executes this command after main node failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -529,12 +529,12 @@ failover_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -545,12 +545,12 @@ failback_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -850,10 +850,10 @@ check_temp_table = catalog
check_unlogged_table = on
# If on, enable unlogged table check in SELECT statements.
- # This initiates queries against system catalog of primary/master
- # thus increases load of master.
+ # This initiates queries against system catalog of primary/main
+ # thus increases load of primary.
# If you are absolutely sure that your system never uses unlogged tables
- # and you want to save access to primary/master, you could turn this off.
+ # and you want to save access to primary/main, you could turn this off.
# Default is on.
enable_shared_relcache = on
# If on, relation cache stored in memory cache,
diff --git a/src/sample/pgpool.conf.sample-slony b/src/sample/pgpool.conf.sample-slony
index b6114d7..de9fc91 100644
--- a/src/sample/pgpool.conf.sample-slony
+++ b/src/sample/pgpool.conf.sample-slony
@@ -460,19 +460,19 @@ delay_threshold = 0
# - Special commands -
-follow_master_command = ''
- # Executes this command after master failover
+follow_primary_command = ''
+ # Executes this command after main node failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -530,12 +530,12 @@ failover_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -546,12 +546,12 @@ failback_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -849,10 +849,10 @@ check_temp_table = catalog
check_unlogged_table = on
# If on, enable unlogged table check in SELECT statements.
- # This initiates queries against system catalog of primary/master
- # thus increases load of master.
+ # This initiates queries against system catalog of primary/main
+ # thus increases load of primary.
# If you are absolutely sure that your system never uses unlogged tables
- # and you want to save access to primary/master, you could turn this off.
+ # and you want to save access to primary/main, you could turn this off.
# Default is on.
enable_shared_relcache = on
# If on, relation cache stored in memory cache,
diff --git a/src/sample/pgpool.conf.sample-snapshot b/src/sample/pgpool.conf.sample-snapshot
index f4d59f6..e8769eb 100644
--- a/src/sample/pgpool.conf.sample-snapshot
+++ b/src/sample/pgpool.conf.sample-snapshot
@@ -457,19 +457,19 @@ delay_threshold = 0
# - Special commands -
-follow_master_command = ''
- # Executes this command after master failover
+follow_primary_command = ''
+ # Executes this command after main node failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -527,12 +527,12 @@ failover_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -543,12 +543,12 @@ failback_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -832,10 +832,10 @@ check_temp_table = catalog
check_unlogged_table = on
# If on, enable unlogged table check in SELECT statements.
- # This initiates queries against system catalog of primary/master
- # thus increases load of master.
+ # This initiates queries against system catalog of primary/main
+ # thus increases load of primary.
# If you are absolutely sure that your system never uses unlogged tables
- # and you want to save access to primary/master, you could turn this off.
+ # and you want to save access to primary/main, you could turn this off.
# Default is on.
enable_shared_relcache = on
# If on, relation cache stored in memory cache,
diff --git a/src/sample/pgpool.conf.sample-stream b/src/sample/pgpool.conf.sample-stream
index 3cac2d7..6ca78f5 100644
--- a/src/sample/pgpool.conf.sample-stream
+++ b/src/sample/pgpool.conf.sample-stream
@@ -462,19 +462,19 @@ delay_threshold = 10000000
# - Special commands -
-follow_master_command = ''
- # Executes this command after master failover
+follow_primary_command = ''
+ # Executes this command after main node failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -532,12 +532,12 @@ failover_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -548,12 +548,12 @@ failback_command = ''
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
- # %m = new master node id
- # %H = new master node hostname
- # %M = old master node id
+ # %m = new main node id
+ # %H = new main node hostname
+ # %M = old main node id
# %P = old primary node id
- # %r = new master port number
- # %R = new master database cluster path
+ # %r = new main port number
+ # %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -834,10 +834,10 @@ check_temp_table = catalog
check_unlogged_table = on
# If on, enable unlogged table check in SELECT statements.
- # This initiates queries against system catalog of primary/master
- # thus increases load of master.
+ # This initiates queries against system catalog of primary/main
+ # thus increases load of primary.
# If you are absolutely sure that your system never uses unlogged tables
- # and you want to save access to primary/master, you could turn this off.
+ # and you want to save access to primary/main, you could turn this off.
# Default is on.
enable_shared_relcache = on
# If on, relation cache stored in memory cache,
diff --git a/src/sample/pgpool_recovery b/src/sample/pgpool_recovery
index 1579f91..d69e77d 100644
--- a/src/sample/pgpool_recovery
+++ b/src/sample/pgpool_recovery
@@ -2,7 +2,7 @@
if [ $# -ne 4 ]
then
- echo "pgpool_recovery datadir remote_host remote_datadir master_port"
+ echo "pgpool_recovery datadir remote_host remote_datadir primary_port"
exit 1
fi
diff --git a/src/sample/scripts/failover.sh.sample b/src/sample/scripts/failover.sh.sample
index 4317ce0..8b4f460 100755
--- a/src/sample/scripts/failover.sh.sample
+++ b/src/sample/scripts/failover.sh.sample
@@ -9,12 +9,12 @@ exec > >(logger -i -p local1.info) 2>&1
# %h = failed node hostname
# %p = failed node port number
# %D = failed node database cluster path
-# %m = new master node id
-# %H = new master node hostname
-# %M = old master node id
+# %m = new main node id
+# %H = new main node hostname
+# %M = old main node id
# %P = old primary node id
-# %r = new master port number
-# %R = new master database cluster path
+# %r = new main port number
+# %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
@@ -23,31 +23,31 @@ FAILED_NODE_ID="$1"
FAILED_NODE_HOST="$2"
FAILED_NODE_PORT="$3"
FAILED_NODE_PGDATA="$4"
-NEW_MASTER_NODE_ID="$5"
-NEW_MASTER_NODE_HOST="$6"
-OLD_MASTER_NODE_ID="$7"
+NEW_MAIN_NODE_ID="$5"
+NEW_MAIN_NODE_HOST="$6"
+OLD_MAIN_NODE_ID="$7"
OLD_PRIMARY_NODE_ID="$8"
-NEW_MASTER_NODE_PORT="$9"
-NEW_MASTER_NODE_PGDATA="${10}"
+NEW_MAIN_NODE_PORT="$9"
+NEW_MAIN_NODE_PGDATA="${10}"
OLD_PRIMARY_NODE_HOST="${11}"
OLD_PRIMARY_NODE_PORT="${12}"
PGHOME=/usr/pgsql-11
-logger -i -p local1.info failover.sh: start: failed_node_id=$FAILED_NODE_ID old_primary_node_id=$OLD_PRIMARY_NODE_ID failed_host=$FAILED_NODE_HOST new_master_host=$NEW_MASTER_NODE_HOST
+logger -i -p local1.info failover.sh: start: failed_node_id=$FAILED_NODE_ID old_primary_node_id=$OLD_PRIMARY_NODE_ID failed_host=$FAILED_NODE_HOST new_main_host=$NEW_MAIN_NODE_HOST
-## If there's no master node anymore, skip failover.
-if [ $NEW_MASTER_NODE_ID -lt 0 ]; then
+## If there's no main node anymore, skip failover.
+if [ $NEW_MAIN_NODE_ID -lt 0 ]; then
logger -i -p local1.info failover.sh: All nodes are down. Skipping failover.
exit 0
fi
## Test passwrodless SSH
-ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MASTER_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ls /tmp > /dev/null
+ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ls /tmp > /dev/null
if [ $? -ne 0 ]; then
- logger -i -p local1.info failover.sh: passwrodless SSH to postgres@${NEW_MASTER_NODE_HOST} failed. Please setup passwrodless SSH.
+ logger -i -p local1.info failover.sh: passwrodless SSH to postgres@${NEW_MAIN_NODE_HOST} failed. Please setup passwrodless SSH.
exit 1
fi
@@ -68,15 +68,15 @@ if [ $FAILED_NODE_ID -ne $OLD_PRIMARY_NODE_ID ]; then
fi
## Promote Standby node.
-logger -i -p local1.info failover.sh: Primary node is down, promote standby node ${NEW_MASTER_NODE_HOST}.
+logger -i -p local1.info failover.sh: Primary node is down, promote standby node ${NEW_MAIN_NODE_HOST}.
ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
- postgres@${NEW_MASTER_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ${PGHOME}/bin/pg_ctl -D ${NEW_MASTER_NODE_PGDATA} -w promote
+ postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ${PGHOME}/bin/pg_ctl -D ${NEW_MAIN_NODE_PGDATA} -w promote
if [ $? -ne 0 ]; then
- logger -i -p local1.error failover.sh: new_master_host=$NEW_MASTER_NODE_HOST promote failed
+ logger -i -p local1.error failover.sh: new_main_host=$NEW_MAIN_NODE_HOST promote failed
exit 1
fi
-logger -i -p local1.info failover.sh: end: new_master_node_id=$NEW_MASTER_NODE_ID started as the primary node
+logger -i -p local1.info failover.sh: end: new_main_node_id=$NEW_MAIN_NODE_ID started as the primary node
exit 0
diff --git a/src/sample/scripts/follow_primary.sh.sample b/src/sample/scripts/follow_primary.sh.sample
new file mode 100755
index 0000000..10939ca
--- /dev/null
+++ b/src/sample/scripts/follow_primary.sh.sample
@@ -0,0 +1,171 @@
+#!/bin/bash
+# This script is run after failover_command to synchronize the Standby with the new Primary.
+# First try pg_rewind. If pg_rewind failed, use pg_basebackup.
+
+set -o xtrace
+exec > >(logger -i -p local1.info) 2>&1
+
+# Special values:
+# %d = failed node id
+# %h = failed node hostname
+# %p = failed node port number
+# %D = failed node database cluster path
+# %m = new main node id
+# %H = new main node hostname
+# %M = old main node id
+# %P = old primary node id
+# %r = new main port number
+# %R = new main database cluster path
+# %N = old primary node hostname
+# %S = old primary node port number
+# %% = '%' character
+
+FAILED_NODE_ID="$1"
+FAILED_NODE_HOST="$2"
+FAILED_NODE_PORT="$3"
+FAILED_NODE_PGDATA="$4"
+NEW_MAIN_NODE_ID="$5"
+NEW_MAIN_NODE_HOST="$6"
+OLD_MAIN_NODE_ID="$7"
+OLD_PRIMARY_NODE_ID="$8"
+NEW_MAIN_NODE_PORT="$9"
+NEW_MAIN_NODE_PGDATA="${10}"
+
+PGHOME=/usr/pgsql-11
+ARCHIVEDIR=/var/lib/pgsql/archivedir
+REPLUSER=repl
+PCP_USER=pgpool
+PGPOOL_PATH=/usr/bin
+PCP_PORT=9898
+
+logger -i -p local1.info follow_primary.sh: start: Standby node ${FAILED_NODE_ID}
+
+## Test passwrodless SSH
+ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ls /tmp > /dev/null
+
+if [ $? -ne 0 ]; then
+ logger -i -p local1.info follow_main.sh: passwrodless SSH to postgres@${NEW_MAIN_NODE_HOST} failed. Please setup passwrodless SSH.
+ exit 1
+fi
+
+## Get PostgreSQL major version
+PGVERSION=`${PGHOME}/bin/initdb -V | awk '{print $3}' | sed 's/\..*//' | sed 's/\([0-9]*\)[a-zA-Z].*/\1/'`
+
+if [ $PGVERSION -ge 12 ]; then
+RECOVERYCONF=${FAILED_NODE_PGDATA}/myrecovery.conf
+else
+RECOVERYCONF=${FAILED_NODE_PGDATA}/recovery.conf
+fi
+
+## Check the status of Standby
+ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
+postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ${PGHOME}/bin/pg_ctl -w -D ${FAILED_NODE_PGDATA} status
+
+
+## If Standby is running, synchronize it with the new Primary.
+if [ $? -eq 0 ]; then
+
+ logger -i -p local1.info follow_primary.sh: pg_rewind for $FAILED_NODE_ID
+
+ # Create replication slot "${FAILED_NODE_HOST}"
+ ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool "
+ ${PGHOME}/bin/psql -p ${NEW_MAIN_NODE_PORT} -c \"SELECT pg_create_physical_replication_slot('${FAILED_NODE_HOST}');\"
+ "
+
+ ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool "
+
+ set -o errexit
+
+ ${PGHOME}/bin/pg_ctl -w -m f -D ${FAILED_NODE_PGDATA} stop
+
+ cat > ${RECOVERYCONF} << EOT
+primary_conninfo = 'host=${NEW_MAIN_NODE_HOST} port=${NEW_MAIN_NODE_PORT} user=${REPLUSER} application_name=${FAILED_NODE_HOST} passfile=''/var/lib/pgsql/.pgpass'''
+recovery_target_timeline = 'latest'
+restore_command = 'scp ${NEW_MAIN_NODE_HOST}:${ARCHIVEDIR}/%f %p'
+primary_slot_name = '${FAILED_NODE_HOST}'
+EOT
+
+ if [ ${PGVERSION} -ge 12 ]; then
+ touch ${FAILED_NODE_PGDATA}/standby.signal
+ else
+ echo \"standby_mode = 'on'\" >> ${RECOVERYCONF}
+ fi
+
+ ${PGHOME}/bin/pg_rewind -D ${FAILED_NODE_PGDATA} --source-server=\"user=postgres host=${NEW_MAIN_NODE_HOST} port=${NEW_MAIN_NODE_PORT}\"
+
+ "
+
+ if [ $? -ne 0 ]; then
+ logger -i -p local1.error follow_primary.sh: end: pg_rewind failed. Try pg_basebackup.
+
+ ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool "
+
+ set -o errexit
+
+ # Execute pg_basebackup
+ rm -rf ${FAILED_NODE_PGDATA}
+ rm -rf ${ARCHIVEDIR}/*
+ ${PGHOME}/bin/pg_basebackup -h ${NEW_MAIN_NODE_HOST} -U $REPLUSER -p ${NEW_MAIN_NODE_PORT} -D ${FAILED_NODE_PGDATA} -X stream
+
+ if [ ${PGVERSION} -ge 12 ]; then
+ sed -i -e \"\\\$ainclude_if_exists = '$(echo ${RECOVERYCONF} | sed -e 's/\//\\\//g')'\" \
+ -e \"/^include_if_exists = '$(echo ${RECOVERYCONF} | sed -e 's/\//\\\//g')'/d\" ${FAILED_NODE_PGDATA}/postgresql.conf
+ fi
+
+ cat > ${RECOVERYCONF} << EOT
+primary_conninfo = 'host=${NEW_MAIN_NODE_HOST} port=${NEW_MAIN_NODE_PORT} user=${REPLUSER} application_name=${FAILED_NODE_HOST} passfile=''/var/lib/pgsql/.pgpass'''
+recovery_target_timeline = 'latest'
+restore_command = 'scp ${NEW_MAIN_NODE_HOST}:${ARCHIVEDIR}/%f %p'
+primary_slot_name = '${FAILED_NODE_HOST}'
+EOT
+
+ if [ ${PGVERSION} -ge 12 ]; then
+ touch ${FAILED_NODE_PGDATA}/standby.signal
+ else
+ echo \"standby_mode = 'on'\" >> ${RECOVERYCONF}
+ fi
+ "
+
+ if [ $? -ne 0 ]; then
+ # drop replication slot
+ ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool "
+ ${PGHOME}/bin/psql -p ${NEW_MAIN_NODE_PORT} -c \"SELECT pg_drop_replication_slot('${FAILED_NODE_HOST}')\"
+ "
+
+ logger -i -p local1.error follow_primary.sh: end: pg_basebackup failed
+ exit 1
+ fi
+ fi
+
+ # start Standby node on ${FAILED_NODE_HOST}
+ ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
+ postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool $PGHOME/bin/pg_ctl -l /dev/null -w -D ${FAILED_NODE_PGDATA} start
+
+ # If start Standby successfully, attach this node
+ if [ $? -eq 0 ]; then
+
+ # Run pcp_attact_node to attach Standby node to Pgpool-II.
+ ${PGPOOL_PATH}/pcp_attach_node -w -h localhost -U $PCP_USER -p ${PCP_PORT} -n ${FAILED_NODE_ID}
+
+ if [ $? -ne 0 ]; then
+ logger -i -p local1.error follow_primary.sh: end: pcp_attach_node failed
+ exit 1
+ fi
+
+ # If start Standby failed, drop replication slot "${FAILED_NODE_HOST}"
+ else
+
+ ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool \
+ ${PGHOME}/bin/psql -p ${NEW_MAIN_NODE_PORT} -c "SELECT pg_drop_replication_slot('${FAILED_NODE_HOST}')"
+
+ logger -i -p local1.error follow_primary.sh: end: follow primary command failed
+ exit 1
+ fi
+
+else
+ logger -i -p local1.info follow_primary.sh: failed_nod_id=${FAILED_NODE_ID} is not running. skipping follow primary command
+ exit 0
+fi
+
+logger -i -p local1.info follow_primary.sh: end: follow primary command complete
+exit 0
diff --git a/src/sample/scripts/recovery_2nd_stage.sample b/src/sample/scripts/recovery_2nd_stage.sample
index 64eae32..b110fc6 100755
--- a/src/sample/scripts/recovery_2nd_stage.sample
+++ b/src/sample/scripts/recovery_2nd_stage.sample
@@ -1,7 +1,7 @@
#! /bin/sh
# Online recovery 2nd stage script
#
-DATADIR=$1 # master dabatase cluster
+DATADIR=$1 # main dabatase cluster
DEST=$2 # hostname of the DB node to be recovered
DESTDIR=$3 # database cluster of the DB node to be recovered
PORT=$4 # PostgreSQL port number
diff --git a/src/test/pgpool_setup.in b/src/test/pgpool_setup.in
index 2d64aa1..33caf32 100644
--- a/src/test/pgpool_setup.in
+++ b/src/test/pgpool_setup.in
@@ -131,8 +131,8 @@ fi
CONF=$BASEDIR/etc/pgpool.conf
# failover script
FAILOVER_SCRIPT=$BASEDIR/etc/failover.sh
-# follow master script
-FOLLOW_MASTER_SCRIPT=$BASEDIR/etc/follow_master.sh
+# follow primary script
+FOLLOW_PRIMARY_SCRIPT=$BASEDIR/etc/follow_primary.sh
# pgpool_remote_start
PGPOOL_REMOTE_START_SCRIPT=pgpool_remote_start
# Start script name. This will be generated in this script.
@@ -157,23 +157,23 @@ cat >> $FAILOVER_SCRIPT <<'EOF'
# %h = host name
# %p = port number
# %D = database cluster path
-# %m = new master node id
-# %M = old master node id
-# %H = new master node host name
+# %m = new main node id
+# %M = old main node id
+# %H = new main node host name
# %P = old primary node id
-# %R = new master database cluster path
-# %r = new master port number
+# %R = new main database cluster path
+# %r = new main port number
# %% = '%' character
failed_node_id=$1
failed_host_name=$2
failed_port=$3
failed_db_cluster=$4
-new_master_id=$5
-old_master_id=$6
-new_master_host_name=$7
+new_main_id=$5
+old_main_id=$6
+new_main_host_name=$7
old_primary_node_id=$8
-new_master_port_number=$9
-new_master_db_cluster=${10}
+new_main_port_number=$9
+new_main_db_cluster=${10}
mydir=__MYDIR__
log=$mydir/log/failover.log
pg_ctl=__PGBIN__/pg_ctl
@@ -181,10 +181,10 @@ cluster0=$mydir/data0
cluster1=$mydir/data1
date >> $log
-echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_master_id $new_master_id old_master_id $old_master_id new_master_host_name $new_master_host_name old_primary_node_id $old_primary_node_id new_master_port_number $new_master_port_number new_master_db_cluster $new_master_db_cluster" >> $log
+echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_main_id $new_main_id old_main_id $old_main_id new_main_host_name $new_main_host_name old_primary_node_id $old_primary_node_id new_main_port_number $new_main_port_number new_main_db_cluster $new_main_db_cluster" >> $log
-if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # master failed
-! new_primary_db_cluster=${mydir}/data"$new_master_id"
+if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # main node failed
+! new_primary_db_cluster=${mydir}/data"$new_main_id"
echo $pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over
$pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over
sleep 2
@@ -203,34 +203,34 @@ chmod 755 $FAILOVER_SCRIPT
}
#-------------------------------------------
-# create follow master script
+# create follow primary script
#-------------------------------------------
-function create_follow_master_script()
+function create_follow_primary_script()
{
-cat >> $FOLLOW_MASTER_SCRIPT <<'EOF'
+cat >> $FOLLOW_PRIMARY_SCRIPT <<'EOF'
#! /bin/sh
# Execute command by failover.
# special values: %d = node id
# %h = host name
# %p = port number
# %D = database cluster path
-# %m = new master node id
-# %M = old master node id
-# %H = new master node host name
+# %m = new main node id
+# %M = old main node id
+# %H = new main node host name
# %P = old primary node id
-# %R = new master database cluster path
-# %r = new master port number
+# %R = new main database cluster path
+# %r = new main port number
# %% = '%' character
failed_node_id=$1
failed_host_name=$2
failed_port=$3
failed_db_cluster=$4
-new_master_id=$5
-old_master_id=$6
-new_master_host_name=$7
+new_main_id=$5
+old_main_id=$6
+new_main_host_name=$7
old_primary_node_id=$8
-new_master_port_number=$9
-new_master_db_cluster=${10}
+new_main_port_number=$9
+new_main_db_cluster=${10}
mydir=__MYDIR__
log=$mydir/log/failover.log
pg_ctl=__PGBIN__/pg_ctl
@@ -241,7 +241,7 @@ PCP_PORT=__PCPPORT__
export PCPPASSFILE=__PCPPASSFILE__
date >> $log
-echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_master_id $new_master_id old_master_id $old_master_id new_master_host_name $new_master_host_name old_primary_node_id $old_primary_node_id new_master_port_number $new_master_port_number new_master_db_cluster $new_master_db_cluster" >> $log
+echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_main_id $new_main_id old_main_id $old_main_id new_main_host_name $new_main_host_name old_primary_node_id $old_primary_node_id new_main_port_number $new_main_port_number new_main_db_cluster $new_main_db_cluster" >> $log
# Stop standby node if it's running
$pg_ctl -D $failed_db_cluster status >/dev/null 2>&1
@@ -252,7 +252,7 @@ then
# recovery the node
pcp_recovery_node -w -h localhost -p $PCP_PORT -n $failed_node_id >> $log 2>&1
else
- echo "$failed_db_cluster is not running. skipping follow master command." >> $log
+ echo "$failed_db_cluster is not running. skipping follow primary command." >> $log
fi
EOF
@@ -265,9 +265,9 @@ EOF
-e "/__PGBIN__/s@__PGBIN__@$PGBIN@" \
-e "/__PCPPASSFILE__/s@__PCPPASSFILE__@$PCP_PASS_FILE@" \
-e "/__PCPPORT__/s/__PCPPORT__/$PCP_PORT/" \
- $FOLLOW_MASTER_SCRIPT
+ $FOLLOW_PRIMARY_SCRIPT
-chmod 755 $FOLLOW_MASTER_SCRIPT
+chmod 755 $FOLLOW_PRIMARY_SCRIPT
}
#-------------------------------------------
@@ -396,7 +396,7 @@ pg_rewind=__PGBIN__/pg_rewind
DATADIR_BASE=__DATADIR_BASE__
PGSUPERUSER=__PGSUPERUSER__
-master_db_cluster=$1
+main_db_cluster=$1
recovery_node_host_name=$2
DEST_CLUSTER=$3
PORT=$4
@@ -419,7 +419,7 @@ cat >> $1/$SCRIPT <<'EOF'
# First try pg_rewind
# Make backup copy of postgresql.conf since pg_rewind blindly copies
-# $master_db_cluster/postgresql.conf.
+# $main_db_cluster/postgresql.conf.
cp $DEST_CLUSTER/postgresql.conf /tmp/
echo "pg_rewind starts" >> $log
$pg_rewind -P -D $DEST_CLUSTER --source-server="port=$PORT user=$PGSUPERUSER dbname=postgres" >> $log 2>&1
@@ -438,13 +438,13 @@ if [ $pg_rewind_failed = "true" ];then
$psql -p $PORT -c "SELECT pg_start_backup('Streaming Replication', true)" postgres
-echo "source: $master_db_cluster dest: $DEST_CLUSTER" >> $log
+echo "source: $main_db_cluster dest: $DEST_CLUSTER" >> $log
rsync -C -a -c --delete --exclude postgresql.conf --exclude postmaster.pid \
--exclude postmaster.opts --exclude pg_log \
--exclude recovery.conf --exclude recovery.done \
--exclude pg_xlog pg_wal \
-$master_db_cluster/ $DEST_CLUSTER/
+$main_db_cluster/ $DEST_CLUSTER/
rm -fr $DEST_CLUSTER/pg_xlog
mkdir $DEST_CLUSTER/pg_xlog
@@ -532,7 +532,7 @@ EOF
echo "export PGHOST=$PGSOCKET_DIR" >> $1/$SCRIPT
cat >> $1/$SCRIPT <<'EOF'
-master_db_cluster=$1
+main_db_cluster=$1
recovery_node_host_name=$2
DEST_CLUSTER=$3
PORT=$4
@@ -541,12 +541,12 @@ log=$DATADIR_BASE/log/recovery.log
$psql -p $PORT -c "SELECT pg_start_backup('Native Replication', true)" postgres
-echo "source: $master_db_cluster dest: $DEST_CLUSTER" > $log
+echo "source: $main_db_cluster dest: $DEST_CLUSTER" > $log
EOF
if [ $PGVERSION -ge 120 ];then
cat >> $1/$SCRIPT <<'EOF'
-cat > $master_db_cluster/myrecovery.conf < $main_db_cluster/myrecovery.conf <> $1/$SCRIPT <<'EOF'
-cat > $master_db_cluster/recovery.conf < $main_db_cluster/recovery.conf <&5 2>&1
- echo "follow_master_command = '$FOLLOW_MASTER_SCRIPT %d %h %p %D %m %M %H %P %r %R'" >> $CONF
+ echo "creating follow primary script"
+ create_follow_primary_script >&5 2>&1
+ echo "follow_primary_command = '$FOLLOW_PRIMARY_SCRIPT %d %h %p %D %m %M %H %P %r %R'" >> $CONF
fi
$PSQL -p $PGPOOL_PORT test <quorumStatus == -1)
quorumStatus = "QUORUM ABSENT";
else if (cluster->quorumStatus == -2)
- quorumStatus = "NO MASTER NODE";
+ quorumStatus = "NO LEADER NODE";
else
quorumStatus = "UNKNOWN";
@@ -763,8 +763,8 @@ output_watchdog_info_result(PCPResultInfo * pcpResInfo, bool verbose)
printf("Quorum state : %s\n", quorumStatus);
printf("Alive Remote Nodes : %d\n", cluster->aliveNodeCount);
printf("VIP up on local node : %s\n", cluster->escalated ? "YES" : "NO");
- printf("Master Node Name : %s\n", cluster->masterNodeName);
- printf("Master Host Name : %s\n\n", cluster->masterHostName);
+ printf("Leader Node Name : %s\n", cluster->leaderNodeName);
+ printf("Leader Host Name : %s\n\n", cluster->leaderHostName);
printf("Watchdog Node Information \n");
for (i = 0; i < cluster->nodeCount; i++)
@@ -786,8 +786,8 @@ output_watchdog_info_result(PCPResultInfo * pcpResInfo, bool verbose)
printf("%d %s %s %s\n\n",
cluster->remoteNodeCount + 1,
cluster->escalated ? "YES" : "NO",
- cluster->masterNodeName,
- cluster->masterHostName);
+ cluster->leaderNodeName,
+ cluster->leaderHostName);
for (i = 0; i < cluster->nodeCount; i++)
{
diff --git a/src/utils/pool_process_reporting.c b/src/utils/pool_process_reporting.c
index 46a3624..2b927d3 100644
--- a/src/utils/pool_process_reporting.c
+++ b/src/utils/pool_process_reporting.c
@@ -581,9 +581,9 @@ get_config(int *nrows)
i++;
/* - Special commands - */
- StrNCpy(status[i].name, "follow_master_command", POOLCONFIG_MAXNAMELEN);
- snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->follow_master_command);
- StrNCpy(status[i].desc, "follow master command", POOLCONFIG_MAXDESCLEN);
+ StrNCpy(status[i].name, "follow_primary_command", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->follow_primary_command);
+ StrNCpy(status[i].desc, "follow primary command", POOLCONFIG_MAXDESCLEN);
i++;
StrNCpy(status[i].name, "database_redirect_preference_list", POOLCONFIG_MAXNAMELEN);
@@ -769,7 +769,7 @@ get_config(int *nrows)
StrNCpy(status[i].name, "wd_de_escalation_command", POOLCONFIG_MAXNAMELEN);
snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->wd_de_escalation_command);
- StrNCpy(status[i].desc, "command executed when master pgpool resigns occurs", POOLCONFIG_MAXDESCLEN);
+ StrNCpy(status[i].desc, "command executed when leader pgpool resigns occurs", POOLCONFIG_MAXDESCLEN);
i++;
StrNCpy(status[i].name, "trusted_servers", POOLCONFIG_MAXNAMELEN);
@@ -779,7 +779,7 @@ get_config(int *nrows)
StrNCpy(status[i].name, "delegate_IP", POOLCONFIG_MAXNAMELEN);
snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->delegate_IP);
- StrNCpy(status[i].desc, "delegate IP address of master pgpool", POOLCONFIG_MAXDESCLEN);
+ StrNCpy(status[i].desc, "delegate IP address of leader pgpool", POOLCONFIG_MAXDESCLEN);
i++;
StrNCpy(status[i].name, "wd_priority", POOLCONFIG_MAXNAMELEN);
@@ -1311,10 +1311,10 @@ get_nodes(int *nrows)
}
else
{
- if (i == REAL_MASTER_NODE_ID)
- snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "master");
+ if (i == REAL_MAIN_NODE_ID)
+ snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "main");
else
- snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "slave");
+ snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "replica");
}
/* status last changed */