diff --git a/doc/src/sgml/examples.sgml b/doc/src/sgml/examples.sgml
index ea97551b..d8df89be 100644
--- a/doc/src/sgml/examples.sgml
+++ b/doc/src/sgml/examples.sgml
@@ -1930,7 +1930,7 @@ PING 35.163.178.3 (35.163.178.3) 56(84) bytes of data.
- Disable
+ Disable
to avoid failover when connecting to the backend or
detecting errors on backend side while executing
queries for the same reasons above.
diff --git a/doc/src/sgml/failover.sgml b/doc/src/sgml/failover.sgml
index 51968b47..1eb62f35 100644
--- a/doc/src/sgml/failover.sgml
+++ b/doc/src/sgml/failover.sgml
@@ -113,7 +113,7 @@
However from Pgpool-II 3.6, In
the steaming replication mode, client sessions will not be
- disconnected when a fail-over occurs any more if the
+ disconnected when a failover occurs any more if the
session does not use the failed standby server. If the
primary server goes down, still all sessions will be
disconnected. Health check timeout case will also cause
@@ -304,10 +304,10 @@
-
- fail_over_on_backend_error (boolean)
+
+ failover_on_backend_error (boolean)
- fail_over_on_backend_error configuration parameter
+ failover_on_backend_error configuration parameter
@@ -322,7 +322,7 @@
It is recommended to turn on the backend health checking
(see )
- when fail_over_on_backend_error is set to off.
+ when failover_on_backend_error is set to off.
Note, however, that Pgpool-II still triggers the
failover when it detects the administrative shutdown of
PostgreSQL> backend server.
@@ -332,6 +332,12 @@
This parameter can be changed by reloading the Pgpool-II> configurations.
+
+
+ Prior to Pgpool-II V4.0,
+ this configuration parameter name was fail_over_on_backend_error
+
+
diff --git a/doc/src/sgml/healthcheck.sgml b/doc/src/sgml/healthcheck.sgml
index d75d3514..6db7b0bd 100644
--- a/doc/src/sgml/healthcheck.sgml
+++ b/doc/src/sgml/healthcheck.sgml
@@ -174,7 +174,7 @@
- It is advised that must be disabled,
+ It is advised that must be disabled,
if you want to enable health_check_max_retries>.
diff --git a/doc/src/sgml/release-3.6.sgml b/doc/src/sgml/release-3.6.sgml
index f2f381f0..edba4c4a 100644
--- a/doc/src/sgml/release-3.6.sgml
+++ b/doc/src/sgml/release-3.6.sgml
@@ -2174,9 +2174,9 @@
- Improve the behavior of fail-over. In the steaming
+ Improve the behavior of failover. In the steaming
replication mode, client sessions will not be disconnected
- when a fail-over occurs any more if the session does not use
+ when a failover occurs any more if the session does not use
the failed standby server. If the primary server goes down,
still all sessions will be disconnected. Also it is possible
to connect to Pgpool-II even if
@@ -2218,7 +2218,7 @@
In some cases pg_terminate_backend() now does not trigger a
- fail-over.
+ failover.
@@ -2248,11 +2248,11 @@
2016-05-05 [0d66032] Allow to access to pgpool while doing health checking
-->
- Improve the behavior of fail-over. (Tatsuo Ishii)
+ Improve the behavior of failover. (Tatsuo Ishii)
In the steaming replication mode, client sessions will not
- be disconnected when a fail-over occurs any more if the
+ be disconnected when a failover occurs any more if the
session does not use the failed standby server. If the
primary server goes down, still all sessions will be
disconnected. Health check timeout case will also cause the
@@ -2264,9 +2264,9 @@
For user's convenience, "show pool_nodes" command shows the
session local load balance node info since this is important
- for users in case of fail-over. If the load balance node is
+ for users in case of failover. If the load balance node is
not the failed node, the session will not be affected by
- fail-over.
+ failover.
@@ -2277,12 +2277,12 @@
health checking retries. Before any attempt to connect to
Pgpool-II fails if it is doing a
health check against failed node even if
- is off
+ is off
because Pgpool-II child first
tries to connect to all backend including the failed one and
exits if it fails to connect to a backend (of course it
fails). This is a temporary situation and will be resolved
- once pgpool executes fail-over. However if the health check
+ once pgpool executes failover. However if the health check
is retrying, the temporary situation keeps longer depending
on the setting
of and
@@ -2409,7 +2409,7 @@
2016-08-26 [f284be4] Handling of pg_terminate_backend for simple query protocol
-->
- In some cases pg_terminate_backend() now does not trigger a fail-over. (Muhammad Usama)
+ In some cases pg_terminate_backend() now does not trigger a failover. (Muhammad Usama)
Because PostgreSQL returns exactly the same error code as postmaster
@@ -2976,7 +2976,7 @@
-->
Fix that pool_read() does not emit error messages when read(2) returns -1 if
- is off. (Tatsuo Ishii)
+ is off. (Tatsuo Ishii)
diff --git a/src/config/pool_config_variables.c b/src/config/pool_config_variables.c
index 4559c61c..99be5238 100644
--- a/src/config/pool_config_variables.c
+++ b/src/config/pool_config_variables.c
@@ -104,6 +104,7 @@ static bool WdSlotEmptyCheckFunc(int index);
static bool BackendSlotEmptyCheckFunc(int index);
/*variable custom assign functions */
+static bool FailOverOnBackendErrorAssignMessage(ConfigContext scontext, bool newval, int elevel);
static bool BackendPortAssignFunc(ConfigContext context, int newval, int index, int elevel);
static bool BackendHostAssignFunc(ConfigContext context, char *newval, int index, int elevel);
static bool BackendDataDirAssignFunc(ConfigContext context, char *newval, int index, int elevel);
@@ -369,10 +370,20 @@ static struct config_bool ConfigureNamesBool[] =
{
{"fail_over_on_backend_error", CFGCXT_RELOAD, FAILOVER_CONFIG,
+ "Old config parameter for failover_on_backend_error.",
+ CONFIG_VAR_TYPE_BOOL, false, VAR_HIDDEN_IN_SHOW_ALL
+ },
+ NULL,
+ true,
+ FailOverOnBackendErrorAssignMessage, NULL, NULL
+ },
+
+ {
+ {"failover_on_backend_error", CFGCXT_RELOAD, FAILOVER_CONFIG,
"Triggers fail over when reading/writing to backend socket fails.",
CONFIG_VAR_TYPE_BOOL, false, 0
},
- &g_pool_config.fail_over_on_backend_error,
+ &g_pool_config.failover_on_backend_error,
true,
NULL, NULL, NULL
},
@@ -4111,6 +4122,20 @@ HBDestinationPortAssignFunc(ConfigContext context, int newval, int index, int el
return true;
}
+/*
+ * Throws warning for if someone uses the removed fail_over_on_backend
+ * configuration parameter
+ */
+static bool
+FailOverOnBackendErrorAssignMessage(ConfigContext scontext, bool newval, int elevel)
+{
+ if (scontext != CFGCXT_BOOT)
+ ereport(WARNING,
+ (errmsg("fail_over_on_backend_error is changed to failover_on_backend_error"),
+ errdetail("setting failover_on_backend_error has no effect"),
+ errhint("use failover_on_backend_error instead")));
+ return true;
+}
/*
* Check DB node spec. node spec should be either "primary", "standby" or
* numeric DB node id.
diff --git a/src/include/pool_config.h b/src/include/pool_config.h
index 32b9b1c8..844e998d 100644
--- a/src/include/pool_config.h
+++ b/src/include/pool_config.h
@@ -262,7 +262,7 @@ typedef struct
* ended */
char *failback_command; /* execute command when failback happens */
- bool fail_over_on_backend_error; /* If true, trigger fail over when
+ bool failover_on_backend_error; /* If true, trigger fail over when
* writing to the backend
* communication socket fails.
* This is the same behavior of
diff --git a/src/protocol/pool_connection_pool.c b/src/protocol/pool_connection_pool.c
index d5f05a5a..6370cb5f 100644
--- a/src/protocol/pool_connection_pool.c
+++ b/src/protocol/pool_connection_pool.c
@@ -890,10 +890,10 @@ static POOL_CONNECTION_POOL * new_connection(POOL_CONNECTION_POOL * p)
if (create_cp(s, i) == NULL)
{
/*
- * If fail_over_on_backend_error is true, do failover. Otherwise,
+ * If failover_on_backend_error is true, do failover. Otherwise,
* just exit this session or skip next health node.
*/
- if (pool_config->fail_over_on_backend_error)
+ if (pool_config->failover_on_backend_error)
{
notice_backend_error(i, REQ_DETAIL_SWITCHOVER);
ereport(FATAL,
@@ -910,7 +910,7 @@ static POOL_CONNECTION_POOL * new_connection(POOL_CONNECTION_POOL * p)
{
ereport(LOG,
(errmsg("failed to create a backend %d connection", i),
- errdetail("skip this backend because because fail_over_on_backend_error is off and we are in streaming replication mode and node is standby node")));
+ errdetail("skip this backend because because failover_on_backend_error is off and we are in streaming replication mode and node is standby node")));
/* set down status to local status area */
*(my_backend_status[i]) = CON_DOWN;
@@ -938,7 +938,7 @@ static POOL_CONNECTION_POOL * new_connection(POOL_CONNECTION_POOL * p)
{
ereport(FATAL,
(errmsg("failed to create a backend %d connection", i),
- errdetail("not executing failover because fail_over_on_backend_error is off")));
+ errdetail("not executing failover because failover_on_backend_error is off")));
}
}
child_exit(POOL_EXIT_AND_RESTART);
diff --git a/src/sample/pgpool.conf.sample b/src/sample/pgpool.conf.sample
index b0d4b3ca..c5304e89 100644
--- a/src/sample/pgpool.conf.sample
+++ b/src/sample/pgpool.conf.sample
@@ -466,7 +466,7 @@ failback_command = ''
# %R = new master database cluster path
# %% = '%' character
-fail_over_on_backend_error = on
+failover_on_backend_error = on
# Initiates failover when reading/writing to the
# backend communication socket fails
# If set to off, pgpool will report an
diff --git a/src/sample/pgpool.conf.sample-logical b/src/sample/pgpool.conf.sample-logical
index 809ed6f7..b3f1aeb6 100644
--- a/src/sample/pgpool.conf.sample-logical
+++ b/src/sample/pgpool.conf.sample-logical
@@ -451,7 +451,7 @@ failback_command = ''
# %R = new master database cluster path
# %% = '%' character
-fail_over_on_backend_error = on
+failover_on_backend_error = on
# Initiates failover when reading/writing to the
# backend communication socket fails
# If set to off, pgpool will report an
diff --git a/src/sample/pgpool.conf.sample-master-slave b/src/sample/pgpool.conf.sample-master-slave
index 56fedcae..9d330c27 100644
--- a/src/sample/pgpool.conf.sample-master-slave
+++ b/src/sample/pgpool.conf.sample-master-slave
@@ -465,7 +465,7 @@ failback_command = ''
# %R = new master database cluster path
# %% = '%' character
-fail_over_on_backend_error = on
+failover_on_backend_error = on
# Initiates failover when reading/writing to the
# backend communication socket fails
# If set to off, pgpool will report an
diff --git a/src/sample/pgpool.conf.sample-replication b/src/sample/pgpool.conf.sample-replication
index fa6d5f30..827cea31 100644
--- a/src/sample/pgpool.conf.sample-replication
+++ b/src/sample/pgpool.conf.sample-replication
@@ -463,7 +463,7 @@ failback_command = ''
# %R = new master database cluster path
# %% = '%' character
-fail_over_on_backend_error = on
+failover_on_backend_error = on
# Initiates failover when reading/writing to the
# backend communication socket fails
# If set to off, pgpool will report an
diff --git a/src/sample/pgpool.conf.sample-stream b/src/sample/pgpool.conf.sample-stream
index 9138862a..84b791d3 100644
--- a/src/sample/pgpool.conf.sample-stream
+++ b/src/sample/pgpool.conf.sample-stream
@@ -464,7 +464,7 @@ failback_command = ''
# %R = new master database cluster path
# %% = '%' character
-fail_over_on_backend_error = on
+failover_on_backend_error = on
# Initiates failover when reading/writing to the
# backend communication socket fails
# If set to off, pgpool will report an
diff --git a/src/utils/pool_process_reporting.c b/src/utils/pool_process_reporting.c
index b5d22783..8c8d4dad 100644
--- a/src/utils/pool_process_reporting.c
+++ b/src/utils/pool_process_reporting.c
@@ -605,9 +605,9 @@ get_config(int *nrows)
StrNCpy(status[i].desc, "failback command", POOLCONFIG_MAXDESCLEN);
i++;
- StrNCpy(status[i].name, "fail_over_on_backend_error", POOLCONFIG_MAXNAMELEN);
- snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->fail_over_on_backend_error);
- StrNCpy(status[i].desc, "fail over on backend error", POOLCONFIG_MAXDESCLEN);
+ StrNCpy(status[i].name, "failover_on_backend_error", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->failover_on_backend_error);
+ StrNCpy(status[i].desc, "failover on backend error", POOLCONFIG_MAXDESCLEN);
i++;
StrNCpy(status[i].name, "detach_false_primary", POOLCONFIG_MAXNAMELEN);
diff --git a/src/utils/pool_stream.c b/src/utils/pool_stream.c
index 63149d49..cfec0338 100644
--- a/src/utils/pool_stream.c
+++ b/src/utils/pool_stream.c
@@ -220,10 +220,10 @@ pool_read(POOL_CONNECTION * cp, void *buf, int len)
}
/*
- * if fail_over_on_backend_error is true, then trigger
+ * if failover_on_backend_error is true, then trigger
* failover
*/
- if (pool_config->fail_over_on_backend_error)
+ if (pool_config->failover_on_backend_error)
{
notice_backend_error(cp->db_node_id, REQ_DETAIL_SWITCHOVER);
@@ -372,10 +372,10 @@ pool_read2(POOL_CONNECTION * cp, int len)
}
/*
- * if fail_over_on_backend_error is true, then trigger
+ * if failover_on_backend_error is true, then trigger
* failover
*/
- if (pool_config->fail_over_on_backend_error)
+ if (pool_config->failover_on_backend_error)
{
notice_backend_error(cp->db_node_id, REQ_DETAIL_SWITCHOVER);
child_exit(POOL_EXIT_AND_RESTART);
@@ -388,7 +388,7 @@ pool_read2(POOL_CONNECTION * cp, int len)
{
ereport(ERROR,
(errmsg("unable to read data from DB node %d", cp->db_node_id),
- errdetail("do not failover because fail_over_on_backend_error is off")));
+ errdetail("do not failover because failover_on_backend_error is off")));
}
}
else
@@ -728,8 +728,8 @@ pool_flush(POOL_CONNECTION * cp)
errdetail("pg_terminate_backend was called on the backend")));
}
- /* if fail_over_on_backend_error is true, then trigger failover */
- if (pool_config->fail_over_on_backend_error)
+ /* if failover_on_backend_error is true, then trigger failover */
+ if (pool_config->failover_on_backend_error)
{
notice_backend_error(cp->db_node_id, REQ_DETAIL_SWITCHOVER);
ereport(LOG,
@@ -743,7 +743,7 @@ pool_flush(POOL_CONNECTION * cp)
{
ereport(ERROR,
(errmsg("unable to flush data to backend"),
- errdetail("do not failover because fail_over_on_backend_error is off")));
+ errdetail("do not failover because failover_on_backend_error is off")));
}
}
else
@@ -785,7 +785,7 @@ pool_flush_noerror(POOL_CONNECTION * cp)
}
/* if fail_over_on_backend_erro is true, then trigger failover */
- if (pool_config->fail_over_on_backend_error)
+ if (pool_config->failover_on_backend_error)
{
notice_backend_error(cp->db_node_id, REQ_DETAIL_SWITCHOVER);
child_exit(POOL_EXIT_AND_RESTART);
@@ -798,7 +798,7 @@ pool_flush_noerror(POOL_CONNECTION * cp)
{
ereport(LOG,
(errmsg("unable to flush data to backend"),
- errdetail("do not failover because fail_over_on_backend_error is off")));
+ errdetail("do not failover because failover_on_backend_error is off")));
return -1;
}
}
diff --git a/src/watchdog/watchdog.c b/src/watchdog/watchdog.c
index 16f940b7..724b4492 100644
--- a/src/watchdog/watchdog.c
+++ b/src/watchdog/watchdog.c
@@ -6666,7 +6666,7 @@ verify_pool_configurations(WatchdogNode * wdNode, POOL_CONFIG * config)
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, replication_stop_on_mismatch);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, allow_clear_text_frontend_auth);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, failover_if_affected_tuples_mismatch);
- WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, fail_over_on_backend_error);
+ WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, failover_on_backend_error);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, replicate_select);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, master_slave_mode);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, connection_cache);
diff --git a/src/watchdog/wd_json_data.c b/src/watchdog/wd_json_data.c
index f14c52e7..734b12ff 100644
--- a/src/watchdog/wd_json_data.c
+++ b/src/watchdog/wd_json_data.c
@@ -88,7 +88,7 @@ get_pool_config_from_json(char *json_data, int data_len)
goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "health_check_retry_delay", &config->health_check_retry_delay))
goto ERROR_EXIT;
- if (json_get_bool_value_for_key(root, "fail_over_on_backend_error", &config->fail_over_on_backend_error))
+ if (json_get_bool_value_for_key(root, "failover_on_backend_error", &config->failover_on_backend_error))
goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "recovery_timeout", &config->recovery_timeout))
goto ERROR_EXIT;
@@ -198,7 +198,7 @@ get_pool_config_json(void)
jw_put_int(jNode, "health_check_period", pool_config->health_check_period);
jw_put_int(jNode, "health_check_max_retries", pool_config->health_check_max_retries);
jw_put_int(jNode, "health_check_retry_delay", pool_config->health_check_retry_delay);
- jw_put_bool(jNode, "fail_over_on_backend_error", pool_config->fail_over_on_backend_error);
+ jw_put_bool(jNode, "failover_on_backend_error", pool_config->failover_on_backend_error);
jw_put_int(jNode, "recovery_timeout", pool_config->recovery_timeout);
jw_put_int(jNode, "search_primary_node_timeout", pool_config->search_primary_node_timeout);
jw_put_int(jNode, "client_idle_limit_in_recovery", pool_config->client_idle_limit_in_recovery);