[pgpool-hackers: 1807] Re: Feature for recording the watchdog warnings to be displayed by pcp_watchdog_info

Muhammad Usama m.usama at gmail.com
Fri Sep 16 05:24:58 JST 2016


Sorry, forgot to attach the patch.


On Fri, Sep 16, 2016 at 1:23 AM, Muhammad Usama <m.usama at gmail.com> wrote:

> Hi
>
> I was working on fixing the "[pgpool-general: 4997] Avoiding downtime
> when pgpool changes require a restart"
> And the part of the fix was to downgrade the FATAL error to WARNING when
> the configurations on the coordinator/master and the standby node differs.
> But since some configuration differences on the watchdog nodes can be
> catastrophic in some cases and the pgpool-II warning messages can be easily
> ignored,
> So I was thinking of a small watchdog feature that records all the warning
> messages generated on a watchdog node and the pcp_watchdog_info on master
> node displays all the warnings on the node.
>
> See the attached WIP test patch.
>
> Output of pcp_watchdog_info with the patch. The WARNING MESSAGE fields are
> added by the attached patch
>
> [usama at localhost bin]$ ./pcp_watchdog_info -h 127.0.0.1 -p 9891  -U
> postgres  -w -v
> Watchdog Cluster Information
> Total Nodes          : 3
> Remote Nodes         : 2
> Quorum state         : QUORUM EXIST
> Alive Remote Nodes   : 2
> VIP up on local node : YES
> Master Node Name     : Linux_localhost.localdomain_9991
> Master Host Name     : localhost
>
> Watchdog Node Information
> Node Name      : Linux_localhost.localdomain_9991
> Host Name      : localhost
> Delegate IP    : Not_Set
> Pgpool port    : 9991
> Watchdog port  : 9001
> Node priority  : 1
> Status         : 4
> Status Name    : MASTER
> WARNING MESSAGES [0]
>
> Node Name      : Linux_localhost.localdomain_9992
> Host Name      : localhost
> Delegate IP    : Not_Set
> Pgpool port    : 9992
> Watchdog port  : 9002
> Node priority  : 1
> Status         : 7
> Status Name    : STANDBY
> WARNING MESSAGES [1]
> 0 value for num_init_children is 1 which is 4 on the master node
>
> Node Name      : Linux_localhost.localdomain_9993
> Host Name      : localhost
> Delegate IP    : Not_Set
> Pgpool port    : 9993
> Watchdog port  : 9003
> Node priority  : 1
> Status         : 7
> Status Name    : STANDBY
> WARNING MESSAGES [3]
> 0 value for num_init_children is 1 which is 4 on the master node
> 1 value for health_check_period is 10 which is 0 on the master node
> 2 value for enable_pool_hba is OFF which is ON on the master node
>
>
> Thoughts and suggestions
>
> Thanks
> Best regards
> Muhammad Usama
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://www.sraoss.jp/pipermail/pgpool-hackers/attachments/20160916/8c9c62dc/attachment-0001.html>
-------------- next part --------------
diff --git a/src/include/pcp/pcp.h b/src/include/pcp/pcp.h
index d05c340..4bb1135 100644
--- a/src/include/pcp/pcp.h
+++ b/src/include/pcp/pcp.h
@@ -46,6 +46,8 @@ typedef struct PCPWDNodeInfo
 	int pgpool_port;						/* pgpool port */
 	char delegate_ip[WD_MAX_HOST_NAMELEN];	/* delegate IP */
 	int	id;
+	int warnings_count;
+	char **warnings;
 }PCPWDNodeInfo;
 
 typedef struct PCPWDClusterInfo
diff --git a/src/include/watchdog/watchdog.h b/src/include/watchdog/watchdog.h
index e7c9770..2c63da2 100644
--- a/src/include/watchdog/watchdog.h
+++ b/src/include/watchdog/watchdog.h
@@ -6,7 +6,7 @@
  * pgpool: a language independent connection pool server for PostgreSQL
  * written by Tatsuo Ishii
  *
- * Copyright (c) 2003-2015	PgPool Global Development Group
+ * Copyright (c) 2003-2016	PgPool Global Development Group
  *
  * Permission to use, copy, modify, and distribute this software and
  * its documentation for any purpose and without fee is hereby
@@ -27,6 +27,8 @@
 #define WATCHDOG_H
 
 #include <sys/time.h>
+#include "parser/pg_list.h"
+
 
 #define WD_TIME_INIT(tv)      ((tv).tv_sec = (tv).tv_usec = 0)
 #define WD_TIME_ISSET(tv)     ((tv).tv_sec || (tv).tv_usec)
@@ -88,6 +90,20 @@ typedef enum {
 
 } WD_EVENTS;
 
+#define MAX_WARNING_MESSAGE_SIZE	255
+typedef enum WatchdogNodeWarningType
+{
+	GENERAL_WARNING = 0x01,
+	CONFIG_MISMATCH_WARNING = 0x02,
+	COMMAND_FAILED_WARNING = 0x04,
+	DATA_NOT_SYNC_WARNING = 0x08,
+}WatchdogNodeWarningType;
+typedef struct WatchdogNodeWarning
+{
+	WatchdogNodeWarningType type;
+	char	message[MAX_WARNING_MESSAGE_SIZE +1];
+}WatchdogNodeWarning;
+
 typedef struct SocketConnection
 {
 	int				sock;			/* socket descriptor */
@@ -114,6 +130,8 @@ typedef struct WatchdogNode
 											 */
 	SocketConnection server_socket;			/* socket connections for this node initiated by remote */
 	SocketConnection client_socket;			/* socket connections for this node initiated by local*/
+	List			*node_warnings;			/* The list of warnings on this node */
+	unsigned int	warnings;
 }WatchdogNode;
 
 extern pid_t initialize_watchdog(void);
diff --git a/src/libs/pcp/pcp.c b/src/libs/pcp/pcp.c
index 753ccb8..962b6f7 100644
--- a/src/libs/pcp/pcp.c
+++ b/src/libs/pcp/pcp.c
@@ -1456,7 +1456,23 @@ process_watchdog_info_response(PCPConnInfo* pcpConn, char* buf, int len)
 				json_value_free(root);
 				goto INVALID_RESPONSE;
 			}
-
+			/* fetch the warnings */
+			json_value* warning_types = json_get_value_for_key(nodeInfoValue,"NodeWarnings");
+			if (warning_types && warning_types->type == json_array && value->u.array.length)
+			{
+				int i;
+				wdNodeInfo->warnings_count = warning_types->u.array.length;
+				wdNodeInfo->warnings = palloc0(sizeof(char*) * wdNodeInfo->warnings_count);
+				for (i = 0; i < wdNodeInfo->warnings_count; i++)
+				{
+					wdNodeInfo->warnings[i] = pstrdup(warning_types->u.array.values[i]->u.string.ptr);
+				}
+			}
+			else
+			{
+				wdNodeInfo->warnings = NULL;
+				wdNodeInfo->warnings_count = 0;
+			}
 		}
 		json_value_free(root);
 
diff --git a/src/tools/pcp/pcp_frontend_client.c b/src/tools/pcp/pcp_frontend_client.c
index 2961aa1..d04f84d 100644
--- a/src/tools/pcp/pcp_frontend_client.c
+++ b/src/tools/pcp/pcp_frontend_client.c
@@ -640,6 +640,7 @@ output_watchdog_info_result(PCPResultInfo* pcpResInfo, bool verbose)
 		printf("Watchdog Node Information \n");
 		for (i=0; i< cluster->nodeCount; i++)
 		{
+			int k;
 			PCPWDNodeInfo* watchdog_info = &cluster->nodeList[i];
 			printf("Node Name      : %s\n",watchdog_info->nodeName);
 			printf("Host Name      : %s\n",watchdog_info->hostName);
@@ -648,7 +649,13 @@ output_watchdog_info_result(PCPResultInfo* pcpResInfo, bool verbose)
 			printf("Watchdog port  : %d\n",watchdog_info->wd_port);
 			printf("Node priority  : %d\n",watchdog_info->wd_priority);
 			printf("Status         : %d\n",watchdog_info->state);
-			printf("Status Name    : %s\n\n",watchdog_info->stateName);
+			printf("Status Name    : %s\n",watchdog_info->stateName);
+			printf("WARNING MESSAGES [%d]\n",watchdog_info->warnings_count);
+			for (k = 0; k < watchdog_info->warnings_count; k++)
+			{
+				printf("%d\t%s\n",k,watchdog_info->warnings[k]);
+			}
+			printf("\n");
 		}
 	}
 	else
diff --git a/src/watchdog/watchdog.c b/src/watchdog/watchdog.c
index 12d8b2f..feb142c 100644
--- a/src/watchdog/watchdog.c
+++ b/src/watchdog/watchdog.c
@@ -416,7 +416,7 @@ static void process_failover_command_sync_requests(WatchdogNode* wdNode, WDPacke
 static WDFailoverCMDResults node_is_asking_for_failover_cmd_end(WatchdogNode* wdNode, WDPacketData* pkt, int failoverCmdType, bool resign);
 static WDFailoverCMDResults node_is_asking_for_failover_cmd_start(WatchdogNode* wdNode, WDPacketData* pkt, int failoverCmdType, bool check);
 static void wd_system_will_go_down(int code, Datum arg);
-static bool verify_pool_configurations(POOL_CONFIG* config);
+static void verify_pool_configurations(WatchdogNode* wdNode, POOL_CONFIG* config);
 
 static bool get_authhash_for_node(WatchdogNode* wdNode, char* authhash);
 static bool verify_authhash_for_node(WatchdogNode* wdNode, char* authhash);
@@ -435,6 +435,9 @@ static void update_interface_status(void);
 static bool any_interface_available(void);
 static WDPacketData* process_data_request(WatchdogNode* wdNode, WDPacketData* pkt);
 
+static void clear_warning_from_node(WatchdogNode* wdNode, WatchdogNodeWarningType type);
+static void add_warning_to_node(WatchdogNode* wdNode, WatchdogNodeWarningType type, char *message);
+
 /* global variables */
 wd_cluster g_cluster;
 struct timeval g_tm_set_time;
@@ -2719,6 +2722,7 @@ static void set_message_data(WDPacketData* pkt, const char* data, int len)
 
 static bool add_nodeinfo_to_json(JsonNode* jNode, WatchdogNode* node)
 {
+	ListCell* lc;
 	jw_start_object(jNode, "WatchdogNode");
 	
 	jw_put_int(jNode, "ID", nodeIfNull_int(private_id,-1));
@@ -2730,9 +2734,32 @@ static bool add_nodeinfo_to_json(JsonNode* jNode, WatchdogNode* node)
 	jw_put_int(jNode, "WdPort", nodeIfNull_int(wd_port,0));
 	jw_put_int(jNode, "PgpoolPort", nodeIfNull_int(pgpool_port,0));
 	jw_put_int(jNode, "Priority", nodeIfNull_int(wd_priority,0));
-
+	/* Now add the warnings if any on the node */
+	if (node->warnings)
+	{
+		jw_start_array(jNode, "NodeWarningTypes");
+		if (node->warnings & GENERAL_WARNING)
+			jw_put_string_value(jNode, "GENERAL WARNING");
+		if (node->warnings & CONFIG_MISMATCH_WARNING)
+			jw_put_string_value(jNode, "CONFIGURATIONS MISMATCH");
+		if (node->warnings & COMMAND_FAILED_WARNING)
+			jw_put_string_value(jNode, "COMMAND FAILED");
+		if (node->warnings & DATA_NOT_SYNC_WARNING)
+			jw_put_string_value(jNode, "DATA SYNC ISSUE");
+		jw_end_element(jNode);
+	}
+	if (node->node_warnings)
+	{
+		jw_start_array(jNode, "NodeWarnings");
+		foreach(lc, node->node_warnings)
+		{
+			WatchdogNodeWarning* warning = lfirst(lc);
+			jw_put_string_value(jNode, warning->message);
+		}
+		jw_end_element(jNode);
+	}
 	jw_end_element(jNode);
-	
+
 	return true;
 }
 
@@ -2969,17 +2996,15 @@ static int standard_packet_processor(WatchdogNode* wdNode, WDPacketData* pkt)
 			
 		case WD_POOL_CONFIG_DATA:
 		{
-			/* we only accept config data from coordinator node */
-			if (wdNode == g_cluster.masterNode && pkt->data)
+			/* only accept config data if I am the coordinator node */
+			if (g_cluster.localNode == g_cluster.masterNode && pkt->data)
 			{
-				POOL_CONFIG* master_config = get_pool_config_from_json(pkt->data, pkt->len);
-				if (master_config)
+				POOL_CONFIG* standby_config = get_pool_config_from_json(pkt->data, pkt->len);
+				if (standby_config)
 				{
-					verify_pool_configurations(master_config);
+					verify_pool_configurations(wdNode, standby_config);
 				}
-				
 			}
-			
 		}
 			break;
 			
@@ -4459,6 +4484,14 @@ static int watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode* wdN
 				}
 					break;
 
+				case WD_JOIN_COORDINATOR_MESSAGE:
+				{
+					reply_with_minimal_message(wdNode, WD_ACCEPT_MESSAGE, pkt);
+					/* Also get the configurations from the standby node */
+					send_message_of_type(wdNode,WD_ASK_FOR_POOL_CONFIG);
+				}
+					break;
+
 				case WD_ADD_NODE_MESSAGE:
 				{
 					standard_packet_processor(wdNode, pkt);
@@ -4739,7 +4772,6 @@ static int watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode* wdNode,
 				if (g_cluster.currentCommand.commandStatus == COMMAND_FINISHED_ALL_REPLIED ||
 					g_cluster.currentCommand.commandStatus == COMMAND_FINISHED_TIMEOUT)
 				{
-					send_message_of_type(g_cluster.masterNode,WD_ASK_FOR_POOL_CONFIG);
 					cluster_in_stable_state();
 
 					ereport(LOG,
@@ -5324,133 +5356,153 @@ static void add_wd_command_for_timer_events(unsigned int expire_secs, bool need_
 	
 }
 
-static bool verify_pool_configurations(POOL_CONFIG* config)
+#define WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config_obj, wdNode, parameter) \
+do { \
+	if (config_obj->parameter != pool_config->parameter) \
+	{ \
+		char warning_message[MAX_WARNING_MESSAGE_SIZE +1];\
+		ereport(WARNING, \
+			(errmsg("configurations value for \"%s\" on node \"%s\" is different", #parameter, wdNode->nodeName), \
+				errdetail("\"%s\" on this node is %d while on \"%s\" is %d", \
+				   #parameter, \
+				   pool_config->parameter, \
+				   wdNode->nodeName, \
+				   config_obj->parameter))); \
+		snprintf(warning_message,MAX_WARNING_MESSAGE_SIZE, "value for %s is %d which is %d on the master node", \
+					#parameter, \
+					config_obj->parameter, \
+					pool_config->parameter); \
+		add_warning_to_node(wdNode,CONFIG_MISMATCH_WARNING,warning_message); \
+	} \
+} while(0)
+#define WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config_obj,wdNode, parameter) \
+do { \
+	if (config_obj->parameter != pool_config->parameter) \
+	{ \
+		char warning_message[MAX_WARNING_MESSAGE_SIZE +1];\
+		ereport(WARNING, \
+			(errmsg("configurations value for \"%s\" on node \"%s\" is different", #parameter, wdNode->nodeName), \
+				errdetail("\"%s\" on this node is %s while on \"%s\" is %s", \
+					#parameter, \
+					pool_config->parameter?"ON":"OFF", \
+					wdNode->nodeName, \
+					config_obj->parameter?"ON":"OFF"))); \
+		snprintf(warning_message,MAX_WARNING_MESSAGE_SIZE, "value for %s is %s which is %s on the master node", \
+					#parameter, \
+					config_obj->parameter?"ON":"OFF", \
+					pool_config->parameter?"ON":"OFF"); \
+		add_warning_to_node(wdNode,CONFIG_MISMATCH_WARNING,warning_message); \
+	} \
+} while(0)
+
+
+static void add_warning_to_node(WatchdogNode* wdNode, WatchdogNodeWarningType type, char *message)
+{
+	/* Node warnings sits in TopMemoryContext*/
+	WatchdogNodeWarning* warning;
+	MemoryContext oldCxt = MemoryContextSwitchTo(TopMemoryContext);
+	warning = palloc0(sizeof(WatchdogNodeWarning));
+	warning->type = type;
+	if (message)
+		strncpy(warning->message,message, sizeof(warning->message));
+	wdNode->node_warnings = lappend(wdNode->node_warnings,warning);
+	wdNode->warnings |= type;
+	MemoryContextSwitchTo(oldCxt);
+}
+
+static void clear_warning_from_node(WatchdogNode* wdNode, WatchdogNodeWarningType type)
 {
-	int i;
-	char *key = "";
-	if (config->num_init_children != pool_config->num_init_children)
-	{
-		key = "num_init_children";
-		goto ERROR_EXIT;
-	}
-	if (config->listen_backlog_multiplier != pool_config->listen_backlog_multiplier)
+	List* warnings_to_del = NULL;
+	ListCell* lc;
+	foreach(lc, wdNode->node_warnings)
 	{
-		key = "listen_backlog_multiplier";
-		goto ERROR_EXIT;
+		WatchdogNodeWarning* warning = lfirst(lc);
+		if (warning->type == type)
+			warnings_to_del = lappend(warnings_to_del,warning);
 	}
-	if (config->child_life_time != pool_config->child_life_time)
-	{
-		key = "child_life_time";
-		goto ERROR_EXIT;
-	}
-	if (config->connection_life_time != pool_config->connection_life_time)
-	{
-		key = "connection_life_time";
-		goto ERROR_EXIT;
-	}
-	if (config->child_max_connections != pool_config->child_max_connections)
-	{
-		key = "child_max_connections";
-		goto ERROR_EXIT;
-	}
-	if (config->client_idle_limit != pool_config->client_idle_limit)
-	{
-		key = "client_idle_limit";
-		goto ERROR_EXIT;
-	}
-	if (config->max_pool != pool_config->max_pool)
-	{
-		key = "max_pool";
-		goto ERROR_EXIT;
-	}
-	if (config->replication_mode != pool_config->replication_mode)
+	/* delete all the sockets from unidentified list which are now identified */
+	foreach(lc, warnings_to_del)
 	{
-		key = "replication_mode";
-		goto ERROR_EXIT;
+		wdNode->node_warnings = list_delete_ptr(wdNode->node_warnings,lfirst(lc));
 	}
-	if (config->enable_pool_hba != pool_config->enable_pool_hba){key = "enable_pool_hba";goto ERROR_EXIT;}
-	if (config->load_balance_mode != pool_config->load_balance_mode){key = "load_balance_mode";goto ERROR_EXIT;}
-	if (config->replication_stop_on_mismatch != pool_config->replication_stop_on_mismatch){key = "replication_stop_on_mismatch";goto ERROR_EXIT;}
-	if (config->failover_if_affected_tuples_mismatch != pool_config->failover_if_affected_tuples_mismatch){key = "failover_if_affected_tuples_mismatch";goto ERROR_EXIT;}
-	if (config->replicate_select != pool_config->replicate_select){key = "replicate_select";goto ERROR_EXIT;}
-	if (config->master_slave_mode != pool_config->master_slave_mode){key = "master_slave_mode";goto ERROR_EXIT;}
-	if (config->connection_cache != pool_config->connection_cache){key = "connection_cache";goto ERROR_EXIT;}
-	if (config->health_check_timeout != pool_config->health_check_timeout){key = "health_check_timeout";goto ERROR_EXIT;}
-	if (config->health_check_period != pool_config->health_check_period){key = "health_check_period";goto ERROR_EXIT;}
-	if (config->health_check_max_retries != pool_config->health_check_max_retries){key = "health_check_max_retries";goto ERROR_EXIT;}
-	
-	if (config->health_check_retry_delay != pool_config->health_check_retry_delay){key = "health_check_retry_delay";goto ERROR_EXIT;}
-	if (config->fail_over_on_backend_error != pool_config->fail_over_on_backend_error){key = "fail_over_on_backend_error";goto ERROR_EXIT;}
-	if (config->recovery_timeout != pool_config->recovery_timeout){key = "recovery_timeout";goto ERROR_EXIT;}
-	if (config->search_primary_node_timeout != pool_config->search_primary_node_timeout){key = "search_primary_node_timeout";goto ERROR_EXIT;}
-	if (config->client_idle_limit_in_recovery != pool_config->client_idle_limit_in_recovery){key = "client_idle_limit_in_recovery";goto ERROR_EXIT;}
-	if (config->insert_lock != pool_config->insert_lock){key = "insert_lock";goto ERROR_EXIT;}
-	
-	if (config->memory_cache_enabled != pool_config->memory_cache_enabled){key = "memory_cache_enabled";goto ERROR_EXIT;}
-	if (config->use_watchdog != pool_config->use_watchdog){key = "use_watchdog";goto ERROR_EXIT;}
-	if (config->clear_memqcache_on_escalation != pool_config->clear_memqcache_on_escalation){key = "clear_memqcache_on_escalation";goto ERROR_EXIT;}
-	
+	list_free_deep(warnings_to_del);
+	wdNode->warnings &= ~type;
+}
+
+static void verify_pool_configurations(WatchdogNode* wdNode, POOL_CONFIG* config)
+{
+	int i;
+	clear_warning_from_node(wdNode,CONFIG_MISMATCH_WARNING);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, num_init_children);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, listen_backlog_multiplier);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, child_life_time);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, connection_life_time);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, child_max_connections);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, client_idle_limit);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, max_pool);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, health_check_timeout);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, health_check_period);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, health_check_max_retries);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, health_check_retry_delay);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, recovery_timeout);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, search_primary_node_timeout);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, client_idle_limit_in_recovery);
+
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, replication_mode);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, enable_pool_hba);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, load_balance_mode);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, replication_stop_on_mismatch);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, failover_if_affected_tuples_mismatch);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, fail_over_on_backend_error);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, replicate_select);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, master_slave_mode);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, connection_cache);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, insert_lock);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, memory_cache_enabled);
+	WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, clear_memqcache_on_escalation);
+
 	if (config->backend_desc->num_backends != pool_config->backend_desc->num_backends)
 	{
-		ereport(FATAL,
-				(return_code(POOL_EXIT_FATAL),
-				 errmsg("configuration error. The configurations on master node is different"),
-				 errdetail("pgpool on master node \"%s\" is configured with %d backends while this node has %d backends configured",
-						   g_cluster.masterNode->nodeName,
-						   config->backend_desc->num_backends,
-						   pool_config->backend_desc->num_backends)));
-		return false;
+		ereport(WARNING,
+				(errmsg("number of configured backends on node \"%s\" are different", wdNode->nodeName),
+				 errdetail("this node has %d backends while on \"%s\" number of configured backends are %d",
+						   pool_config->backend_desc->num_backends,
+						   wdNode->nodeName,
+						   config->backend_desc->num_backends)));
 	}
-	
 	for (i=0; i < pool_config->backend_desc->num_backends; i++)
 	{
 		if (strncasecmp(pool_config->backend_desc->backend_info[i].backend_hostname, config->backend_desc->backend_info[i].backend_hostname, sizeof(pool_config->backend_desc->backend_info[i].backend_hostname)))
 		{
-			ereport(FATAL,
-					(return_code(POOL_EXIT_FATAL),
-					 errmsg("configuration error. The configurations on master node is different"),
-					 errdetail("pgpool on master node \"%s\" backend[%d] hostname \"%s\" is different from \"%s\" on this node",
-							   g_cluster.masterNode->nodeName,
+			ereport(WARNING,
+					(errmsg("configurations value for backend[%d] \"hostname\" on node \"%s\" is different",i, wdNode->nodeName),
+					 errdetail("\"backend_hostname%d\" on this node is %s while on \"%s\" is %s",
 							   i,
-							   config->backend_desc->backend_info[i].backend_hostname,
-							   pool_config->backend_desc->backend_info[i].backend_hostname)));
-			return false;
+							   pool_config->backend_desc->backend_info[i].backend_hostname,
+							   wdNode->nodeName,
+							   config->backend_desc->backend_info[i].backend_hostname)));
 		}
 		if (config->backend_desc->backend_info[i].backend_port != pool_config->backend_desc->backend_info[i].backend_port)
 		{
-			ereport(FATAL,
-					(return_code(POOL_EXIT_FATAL),
-					 errmsg("configuration error. The configurations on master node is different"),
-					 errdetail("pgpool on master node \"%s\" backend[%d] port \"%d\" is different from \"%d\" on this node",
-							   g_cluster.masterNode->nodeName,
+			ereport(WARNING,
+					(errmsg("configurations value for backend[%d] \"port\" on node \"%s\" is different",i, wdNode->nodeName),
+					 errdetail("\"backend_port%d\" on this node is %d while on \"%s\" is %d",
 							   i,
-							   config->backend_desc->backend_info[i].backend_port,
-							   pool_config->backend_desc->backend_info[i].backend_port)));
-			return false;
+							   pool_config->backend_desc->backend_info[i].backend_port,
+							   wdNode->nodeName,
+							   config->backend_desc->backend_info[i].backend_port)));
 		}
 	}
-	
+
 	if (config->wd_remote_nodes.num_wd != pool_config->wd_remote_nodes.num_wd)
 	{
-		ereport(FATAL,
-				(return_code(POOL_EXIT_FATAL),
-				 errmsg("configuration error. The configurations on master node is different"),
-				 errdetail("pgpool on master node \"%s\" is configured with %d watchdog nodes while this node has %d nodes configured",
-						   g_cluster.masterNode->nodeName,
-						   config->wd_remote_nodes.num_wd,
-						   pool_config->wd_remote_nodes.num_wd)));
-		return false;
+		ereport(WARNING,
+				(errmsg("the number of configured watchdog nodes on node \"%s\" are different", wdNode->nodeName),
+				 errdetail("this node has %d watchdog nodes while \"%s\" is configured with %d watchdog nodes",
+						   pool_config->wd_remote_nodes.num_wd,
+						   wdNode->nodeName,
+						   config->wd_remote_nodes.num_wd)));
 	}
-
-
-	return true;
-ERROR_EXIT:
-	ereport(FATAL,
-			(return_code(POOL_EXIT_FATAL),
-			 errmsg("configuration error. The configurations on master node is different"),
-			 errdetail("value for key \"%s\" differs",key)));
-	
-	return false;
 }
 
 static bool get_authhash_for_node(WatchdogNode* wdNode, char* authhash)


More information about the pgpool-hackers mailing list