#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2017,2018,2020,2021. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # 61haes_r721 src/43haes/lib/ksh93/hacmp/KLIB_HACMP_add_cluster.sh 1.32 # # Licensed Materials - Property of IBM # # Restricted Materials of IBM # # COPYRIGHT International Business Machines Corp. 2006,2016 # All Rights Reserved # # US Government Users Restricted Rights - Use, duplication or # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # # IBM_PROLOG_END_TAG # @(#) 7d50537 43haes/lib/ksh93/hacmp/KLIB_HACMP_add_cluster.sh, 726, 2147A_aha726, Aug 13 2021 10:53 AM #================================================ # The following, commented line enforces coding # standards when this file is edited via vim. #================================================ # vim:tabstop=4:shiftwidth=4:expandtab:smarttab #================================================ #====================================================================== # If signature this function changes (new/removed/changed parameters), # make sure that the modify cluster signature also gets inspected for # any matching changes that might be needed (class_processors, # resource_modify, KLIB_HACMP_modify_cluster). Since this function # can invoke the modify function, they must usually be changed as a # unit. #====================================================================== # Start of POD-formatted documentation. Viewing suggestions: # perldoc # pod2text -c # pod2text -c --code # pod2html function devDoc { : <<'=cut' >/dev/null 2>&1 =head1 NAME KLIB_HACMP_add_cluster =head1 SYNOPSIS clmgr add cluster \ [ ] \ [ NODES=[,,...] ] \ [ TYPE={NSC|SC} ] \ [ HEARTBEAT_TYPE={unicast|multicast} ] \ [ CLUSTER_IP= ] \ [ REPOSITORIES=[,,...] ] \ [ FC_SYNC_INTERVAL=## ] \ [ RG_SETTLING_TIME=## ] \ [ MAX_EVENT_TIME=### ] \ [ MAX_RG_PROCESSING_TIME=### ] \ [ DAILY_VERIFICATION={Enabled|Disabled} ] \ [ VERIFICATION_NODE={Default|} ] \ [ VERIFICATION_HOUR=<00..23> ] \ [ VERIFICATION_DEBUGGING={Enabled|Disabled} ] \ [ HEARTBEAT_FREQUENCY= ] \ [ GRACE_PERIOD= ] \ [ SITE_POLICY_FAILURE_ACTION={fallover|notify} ] \ [ SITE_POLICY_NOTIFY_METHOD="" ] \ [ SITE_HEARTBEAT_CYCLE= ] \ [ SITE_GRACE_PERIOD=### ] \ [ TEMP_HOSTNAME={disallow|allow} ] \ [ LPM_POLICY={manage|unmanage} ] \ [ HEARTBEAT_FREQUENCY_DURING_LPM=### ] \ [ NETWORK_FAILURE_DETECTION_TIME=<0,5..590> ] \ [ CAA_AUTO_START_DR={Enabled|Disabled} ] \ [ CAA_REPOS_MODE={assert|event} ] \ [ CAA_CONFIG_TIMEOUT=<0..2147483647> ] \ [ LVM_PREFERRED_READ= ] \ [ CRIT_DAEMON_RESTART_GRACE_PERIOD=<0..240> ] \ [ SKIP_EVENT_PROCESSING_MANAGE_MODE={true|false} ] clmgr add cluster \ [ ] \ [ NODES=[,,...] ] \ TYPE="LC" \ [ HEARTBEAT_TYPE={unicast|multicast} ] \ [ FC_SYNC_INTERVAL=## ] \ [ RG_SETTLING_TIME=## ] \ [ MAX_EVENT_TIME=### ] \ [ MAX_RG_PROCESSING_TIME=### ] \ [ DAILY_VERIFICATION={Enabled|Disabled} ] \ [ VERIFICATION_NODE={Default|} ] \ [ VERIFICATION_HOUR=<00..23> ] \ [ VERIFICATION_DEBUGGING={Enabled|Disabled} ] \ [ HEARTBEAT_FREQUENCY= ] \ [ GRACE_PERIOD= ] \ [ SITE_POLICY_FAILURE_ACTION={fallover|notify} ] \ [ SITE_POLICY_NOTIFY_METHOD="" ] \ [ SITE_HEARTBEAT_CYCLE= ] \ [ SITE_GRACE_PERIOD=### ] \ [ TEMP_HOSTNAME={disallow|allow} ] \ [ LPM_POLICY={manage|unmanage} ] \ [ HEARTBEAT_FREQUENCY_DURING_LPM=### ] \ [ NETWORK_FAILURE_DETECTION_TIME=<0,5..590> ] \ [ CAA_AUTO_START_DR={Enabled|Disabled} ] \ [ CAA_REPOS_MODE={assert|event} ] \ [ CAA_CONFIG_TIMEOUT=<0..2147483647> ] \ [ LVM_PREFERRED_READ= ] \ [ CRIT_DAEMON_RESTART_GRACE_PERIOD=<0..240> ] \ [ SKIP_EVENT_PROCESSING_MANAGE_MODE={true|false} ] NOTE: "NSC": "Non-Site Cluster"; no sites will be defined. "SC": "Stretched Cluster"; simplified infrastructure, ideal for limited distance data replication. Sites *must* be defined. "LC": "Linked Cluster"; full-featured infrastructure, ideal for long distance data replication. Sites *must* be defined. NOTE: the cluster type cannot be modified once the cluster is fully defined and synchronized if sites are already in use. NOTE: "CLUSTER_IP" may only be used with a cluster type of "NSC" or "SC". NOTE: "REPOSITORIES" may only be used with a cluster type of "NSC" or "SC". No more than MAX_NB_OF_BACKUP_REPOSITORIES backup repository disks may be defined per "NSC" or "SC" cluster. NOTE: the "RG_SETTLING_TIME" attribute only affects resource groups with a startup policy of "Online On First Available Node". NOTE: the alias for "cluster" is "cl". =head1 DESCRIPTION Attempts to create a PowerHA SystemMirror cluster that conforms to the provided specifications. =head1 ARGUMENTS 1. properties [REQUIRED] [hash ref] An associative array within which data about the created object can be returned to the caller. 2. name [REQUIRED] [string] The label to apply to the new cluster. * Any arguments that would normally apply to the "modify cluster" operation can also be appended here, after the core arguments. Any "extra" arguments are passed as-is to the modify command. =head1 RETURN 0: no errors were detected; the operation appears to have been successful 1: a general error has occurred 2: a specified resource does not exist, or could not be found 3: some required input was missing 4: some detected input was incorrect in some way 5: a required dependency does not exist 6: a specified search failed to match any data =cut } # End of POD-formatted documentation. ############################################################################### # # Name: diagnose # # Description: attempts to identify any obvious configuration errors in # the local cluster node. # # Inputs: NODES the nodes in this cluster # # Outputs: Displays helpful hints and tips about problems that are # discovered. # # Returns: Number of problems found. # ############################################################################### function diagnose { . $HALIBROOT/log_entry "$0()" "$CL" : INPUTS: $* NODES=$1 [[ $CLMGR_LOGGING == 'med' ]] && set +x # Only trace param values #=================================== : Declare and initialize variables #=================================== typeset -i all_problems=0 problems=0 typeset node= IPADDR= HOSTNAME= ALIASES= for node in $NODES; do get_active_ifs $node | IFS=@ read IPADDR HOSTNAME ALIASES if (( $? != 0 )); then (( all_problems++ )) continue # No help available for this one! fi #========================================================== : Check for the IP address or hostname in the rhosts file #========================================================== rhosts_entry=-1 /usr/bin/grep -qw $IPADDR /etc/cluster/rhosts rhosts_entry=$? if (( rhosts_entry != 0)); then /usr/bin/grep -qw $HOSTNAME /etc/cluster/rhosts rhosts_entry=$? if (( rhosts_entry != 0)); then typeset ERRMSG=" Warning: a valid entry is missing from the %1\$s file. A boot IP address or fully qualified host name for each node must be entered in that file on all nodes in the cluster. Please consider adding either \"%2\$s\" or \"%3\$s\" to all the %1\$s files in your cluster, then restart clcomd on each node. For example: echo \"%3\$s\" >>%1\$s stopsrc -s clcomd; sleep 2; startsrc -s clcomd " cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 235 "$ERRMSG" /etc/cluster/rhosts "$HOSTNAME" "$IPADDR" 1>&2 (( problems++ )) fi fi #=================== : Check /etc/hosts #=================== ETCHOSTS= while read LINE; do LINE=${LINE%%#*} [[ $LINE == *([[:space:]]) ]] && continue LINE=${LINE##+([[:space:]])} ETCHOSTS="$ETCHOSTS $LINE" done \". For example: 10.4.122.215 yourhost.customer.domain.com yourhost Please consider adding a line similar to the following to the %1\$s file on all your nodes, then restart clcomd: echo \"%2\$s\" >>%1\$s stopsrc -s clcomd; sleep 2; startsrc -s clcomd " cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 236 "$ERRMSG" /etc/hosts "$IPADDR $HOSTNAME $ALIASES" 1>&2 (( problems++ )) fi #====================================== : Check clcomd outbound communication #====================================== if (( problems == 0 )); then /usr/sbin/clrsh $node /bin/hostname 2>/dev/null 1>&2 if (( $? != 0 )); then typeset ERRMSG=" Warning: cannot communicate to node \"%1\$s\". This indicates a problem with the clcomd subsystem. Make sure it is running on \"%1\$s\". Check the system configuration files that affect it, /etc/cluster/rhosts and /etc/hosts, for complete entries for all nodes in the cluster. Also consider restarting the service using: stopsrc -s clcomd; sleep 2; startsrc -s clcomd " cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 237 "$ERRMSG" "$node" 1>&2 fi fi (( all_problems += problems )) done if (( all_problems == 0 )); then typeset ERRMSG=" No problems were detected on this node. Of course, that does not mean that no problems actually exist! Only that none were found for you automatically! Here are a few tips that might help you troubleshoot this problem. * Compare the local /etc/hosts and /etc/cluster/rhosts files with their counterparts on the other nodes in this prospective cluster. The rhosts file requires an IP address or fully-qualified host name for each node in the cluster. The hosts file requires the IP address, fully-qualified host name, and short host name for each node in the cluster. * Make sure only one clcomd process is running, and that it is the /usr/sbin/clcomd: ps -ef | grep clcomd | grep -v grep * Perform a full restart (not a refresh) of clcomd using a command like: stopsrc -s clcomd; sleep 2; startsrc -s clcomd " dspmsg -s $CLMGR_SET $CLMGR_MSGS 238 "$ERRMSG" 1>&2 else print -u2 fi log_return_msg "$all_problems" "$0()" "$LINENO" return $? } # End of "diagnose()" #============================================================================ # # Name: KLIB_HACMP_add_cluster # # Description: This is the main, FPATH function that is invoked by clmgr # to define a cluster for the specified cluster nodes. The # cluster will remain a mere definition on the node where # clmgr was invoked until the first synchronization operation. # The synchronization will create the CAA cluster, and push # the cluster definition out to all the defined nodes, thus # completing the cluster creation. # # Inputs: See the "devDoc()" function, above. # # Outputs: The properties hash is populated. The only other outputs are # any error messages that might be needed. # # Returns: Zero if no errors are detected. Otherwise, an appropriate # non-zero value is returned. Refer to the "RETURN" section # of the "devDoc()" function, above, for the standard return # code values/meanings for clmgr. # # NOTE: if a non-zero value is returned, then an effort is # made to remove any partially created cluster # definition. # #============================================================================ function KLIB_HACMP_add_cluster { . $HALIBROOT/log_entry "$0()" "$CL" : version= @(#) 7d50537 43haes/lib/ksh93/hacmp/KLIB_HACMP_add_cluster.sh, 726, 2147A_aha726, Aug 13 2021 10:53 AM : INPUTS: $* typeset -n properties=$1 typeset name=${2//\"/} typeset nodes=${3//,/ } nodes=${nodes//\"/} [[ $CLMGR_LOGGING == 'med' ]] && set +x # Only trace param values #=================================== : Declare and initialize variables #=================================== typeset -i rc=$RC_UNKNOWN typeset existingName=$(CL=$LINENO KLIB_HACMP_get_cluster_name 2>>$CLMGR_TMPLOG) #========================================= : If a cluster is already defined, abort #========================================= if [[ -n $existingName ]]; then dspmsg -s $CLMGR_SET $CLMGR_MSGS 229 "\nERROR: the specified object already exists: \"%1\$s\"\n\n" "$existingName" 1>&2 rc=$RC_ERROR fi #====================================================== : Verify we have at least one node specified. If not, : just add this system. This could happen if called : directly from the CLI instead of SMIT. #====================================================== if (( rc == $RC_UNKNOWN )); then localnode= if [[ -z $nodes ]]; then localnode=$(get_local_node_label) nodes=$localnode dspmsg -s $CLMGR_SET $CLMGR_MSGS 239 "Warning: since no nodes were specified for this cluster, a one-node cluster will be created with this system: \"%1\$s\"\n" "$nodes" fi fi #===================================================================== : If we do not have a cluster name, give it the name node#1_cluster, : where "node#1" is the first node in the list. #===================================================================== if [[ -z $name ]]; then typeset node1=${nodes%% *} name="{$node1%%.*}_cluster" dspmsg -s $CLMGR_SET $CLMGR_MSGS 223 "Warning: since no label was provided, a default label will be\n provided automatically: \"%1\$s\"\n" "$name" elif [[ -n "${name//[a-zA-Z0-9_\-]/}" ]]; then dspmsg -s $CLMGR_SET $CLMGR_MSGS 188 "\nERROR: one or more invalid characters were detected in \"%1\$s\" (\"%2\$s\").\n\nValid characters include letters, numbers, underscores, and dashes only.\n\n" "$name" "${name//[a-zA-Z0-9_\-]/}" 1>&2 rc=$RC_INCORRECT_INPUT fi #================================================================ : Correct the default cluster type, then use it to validate the : repository and cluster IP inputs, if they have been provided. #================================================================ typeset repositories=${_ENV_ARGS[REPOSITORIES]} repositories=${repositories//,/ } typeset -u CTYPE=${_ENV_ARGS[TYPE]} if [[ -n $CTYPE ]]; then case $CTYPE in L*) CLUSTER_TYPE="LC" ;; S*) CLUSTER_TYPE="SC" ;; *) CLUSTER_TYPE="NSC" ;; esac fi typeset -u HBT=${_ENV_ARGS[HEARTBEAT_TYPE]} if [[ $HBT == U* ]]; then if [[ -n ${_ENV_ARGS[CLUSTER_IP]} ]]; then cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 1010 '\nERROR: you have indicated that you want to create a cluster that uses unicast\ncommunications, but you have also provided a multicast IP address (e.g.\n"%1$s"). These settings are not compatible with each other. A multicast\naddress is only valid when multicast communications are in use.\n' \ "${_ENV_ARGS[CLUSTER_IP]}" 1>&2 rc=$RC_INCORRECT_INPUT fi fi if [[ $CLUSTER_TYPE != "LC" ]]; then if [[ $repositories == *([[:space:]]) ]]; then cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 232 "\nWarning: to complete this configuration, a repository disk must be defined.\n" fi else if [[ $repositories != *([[:space:]]) ]]; then cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 1011 '\nERROR: you have indicated that you want to create a linked cluster\n(e.g. TYPE="%1$s"), but you have also specified a cluster-wide\nrepository (e.g. REPOSITORIES="%2$s"). These settings\nare not compatible with each other. A cluster-wide repository\nis only valid in a stretched or standard (no sites) cluster.\n' \ "${_ENV_ARGS[TYPE]}" "${_ENV_ARGS[REPOSITORIES]%%,*}" 1>&2 rc=$RC_INCORRECT_INPUT fi if [[ -n ${_ENV_ARGS[CLUSTER_IP]} ]]; then cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 1012 '\nERROR: you have indicated that you want to create a linked cluster\n(e.g. TYPE="%1$s"), but you have also specified a cluster-wide\nmulticast address (e.g. CLUSTER_IP="%2$s"). These settings\nare not compatible with each other. A cluster-wide multicast\naddress is only valid in a stretched or standard (no sites)\ncluster.\n' \ "${_ENV_ARGS[TYPE]}" "${_ENV_ARGS[CLUSTER_IP]}" 1>&2 rc=$RC_INCORRECT_INPUT fi fi #============================================================= : Create the cluster if no input errors have been detected. : NOTE: this only creates the cluster in the local DCD. The : synchronization process will call CAA create cluster : and populate the ACD on the other nodes. : NOTE: we use clmodclstr, below, because it can not only : create the cluster, like claddclstr, but it can : then also add the nodes. One execution that way. #============================================================= if (( $rc == RC_UNKNOWN )); then print -- "$0()[$LINENO]($SECONDS): clmodclstr -n $name -K -o -p \"$nodes\"" >>$CLMGR_TMPLOG # Always log commands clmodclstr -n $name -K -o -p "$nodes" rc=$? print "$0()[$LINENO]($SECONDS): clmodclstr RC: $rc" >>$CLMGR_TMPLOG # Always log command result if (( $rc == RC_SUCCESS )); then #====================================================== : Reset the local node label now that the new cluster : has been created, and discovery has been completed. #====================================================== export LOCAL_NODE=$(CL=$LINENO get_local_node_label) #Change CRIT_DAEMON_RESTART_GRACE_PERIOD to 60 seconds by default print "HACMPcluster: crit_daemon_restart_grace_period=60" | odmchange -o HACMPcluster rc=$? fi if (( $rc != RC_SUCCESS )); then rc=$RC_ERROR print " ERROR: failed to create the cluster according to the provided specifications. Running the delete code to attempt to ensure that no partial instance of the cluster remains..." 1>&2 CL=$LINENO diagnose "$nodes" : Clean up any partially created/configured cluster CL=$LINENO KLIB_HACMP_delete_cluster >/dev/null 2>&1 fi fi if (( $rc == RC_SUCCESS )); then #================================================================= : If any cluster parameters were specified that were not already : handled by the add cluster code, then call the modify cluster : code to process them.A This avoids having duplicate code in : this script *and* that script. #================================================================= # : Erase values that have already been processed, : so that will not get passed to the modify code. # typeset ATTR="" for ATTR in NAME NODES; do _ENV_ARGS[$ATTR]="" done # : Check for any needed modifications # integer CLUSTER_NEEDS_MODIFICATION=0 for ATTR in FC_SYNC_INTERVAL HEARTBEAT_TYPE REPOSITORIES CLUSTER_IP \ RG_SETTLING_TIME RG_DIST_POLICY MAX_EVENT_TIME \ MAX_RG_PROCESSING_TIME SITE_POLICY_FAILURE_ACTION \ SITE_POLICY_NOTIFY_METHOD \ DAILY_VERIFICATION VERIFICATION_NODE \ VERIFICATION_HOUR VERIFICATION_DEBUGGING \ HEARTBEAT_FREQUENCY GRACE_PERIOD \ SITE_HEARTBEAT_CYCLE SITE_GRACE_PERIOD TYPE \ SPLIT_POLICY MERGE_POLICY NFS_QUORUM_SERVER \ LOCAL_QUORUM_DIRECTORY REMOTE_QUORUM_DIRECTORY \ QUARANTINE_POLICY CRITICAL_RG ACTION_PLAN TIEBREAKER \ NOTIFY_METHOD NOTIFY_INTERVAL MAXIMUM_NOTIFICATIONS \ DEFAULT_SURVIVING_SITE APPLY_TO_PPRC_TAKEOVER \ TEMP_HOSTNAME \ LPM_POLICY HEARTBEAT_FREQUENCY_DURING_LPM \ NETWORK_FAILURE_DETECTION_TIME CAA_AUTO_START_DR \ CAA_REPOS_MODE \ CAA_CONFIG_TIMEOUT LVM_PREFERRED_READ \ CRIT_DAEMON_RESTART_GRACE_PERIOD \ SKIP_EVENT_PROCESSING_MANAGE_MODE do if [[ -n ${_ENV_ARGS[$ATTR]} ]] then CLUSTER_NEEDS_MODIFICATION=1 break fi done # : If needed, perform the modifications # if (( CLUSTER_NEEDS_MODIFICATION )) then CL=$LINENO KLIB_HACMP_modify_cluster properties \ "${_ENV_ARGS[NAME]}" \ "${_ENV_ARGS[FC_SYNC_INTERVAL]}" \ "${_ENV_ARGS[RG_SETTLING_TIME]}" \ "${_ENV_ARGS[RG_DIST_POLICY]}" \ "${_ENV_ARGS[MAX_EVENT_TIME]}" \ "${_ENV_ARGS[MAX_RG_PROCESSING_TIME]}" \ "${_ENV_ARGS[SITE_POLICY_FAILURE_ACTION]}" \ "${_ENV_ARGS[SITE_POLICY_NOTIFY_METHOD]}" \ "${_ENV_ARGS[DAILY_VERIFICATION]}" \ "${_ENV_ARGS[VERIFICATION_NODE]}" \ "${_ENV_ARGS[VERIFICATION_HOUR]}" \ "${_ENV_ARGS[VERIFICATION_DEBUGGING]}" \ "${_ENV_ARGS[REPOSITORIES]}" \ "${_ENV_ARGS[CLUSTER_IP]}" \ "${_ENV_ARGS[HEARTBEAT_FREQUENCY]}" \ "${_ENV_ARGS[GRACE_PERIOD]}" \ "${_ENV_ARGS[SITE_HEARTBEAT_CYCLE]}" \ "${_ENV_ARGS[SITE_GRACE_PERIOD]}" \ "${_ENV_ARGS[TYPE]}" \ "${_ENV_ARGS[HEARTBEAT_TYPE]}" \ "${_ENV_ARGS[TEMP_HOSTNAME]}" \ "${_ENV_ARGS[SPLIT_POLICY]}" \ "${_ENV_ARGS[MERGE_POLICY]}" \ "${_ENV_ARGS[NFS_QUORUM_SERVER]}" \ "${_ENV_ARGS[LOCAL_QUORUM_DIRECTORY]}" \ "${_ENV_ARGS[REMOTE_QUORUM_DIRECTORY]}" \ "${_ENV_ARGS[QUARANTINE_POLICY]}" \ "${_ENV_ARGS[CRITICAL_RG]}" \ "${_ENV_ARGS[ACTION_PLAN]}" \ "${_ENV_ARGS[TIEBREAKER]}" \ "${_ENV_ARGS[NOTIFY_METHOD]}" \ "${_ENV_ARGS[NOTIFY_INTERVAL]}" \ "${_ENV_ARGS[MAXIMUM_NOTIFICATIONS]}" \ "${_ENV_ARGS[DEFAULT_SURVIVING_SITE]}" \ "${_ENV_ARGS[APPLY_TO_PPRC_TAKEOVER]}" \ "${_ENV_ARGS[NODES]}" \ "${_ENV_ARGS[LPM_POLICY]}" \ "${_ENV_ARGS[HEARTBEAT_FREQUENCY_DURING_LPM]}" \ "${_ENV_ARGS[NETWORK_FAILURE_DETECTION_TIME]}" \ "${_ENV_ARGS[CAA_AUTO_START_DR]}" \ "${_ENV_ARGS[CAA_REPOS_MODE]}" \ "${_ENV_ARGS[CAA_CONFIG_TIMEOUT]}" \ "${_ENV_ARGS[LVM_PREFERRED_READ]}" \ "${_ENV_ARGS[CRIT_DAEMON_RESTART_GRACE_PERIOD]}" \ "${_ENV_ARGS[SKIP_EVENT_PROCESSING_MANAGE_MODE]}" if (( $? != RC_SUCCESS )); then rc=$RC_ERROR cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 299 '\nERROR: failed to create the cluster according to the provided specifications. Running the delete code to attempt to ensure that no partial instance of the cluster remains...\n\n' 1>&2 CL=$LINENO KLIB_HACMP_delete_cluster 1>&2 else /usr/es/sbin/cluster/utilities/cltopinfo fi fi #=========================================================== : If output from this operation was requested, retrieve it #=========================================================== if (( $rc == RC_SUCCESS )); then cl_dspmsg -s $CLMGR_SET $CLMGR_MSGS 289 '\n*** The initial cluster configuration information has been saved. You can now define repository disks, along with other configuration information. When the cluster configuration is fully defined, verify and synchronize the cluster to deploy the configuration to all defined nodes.\n\n' if (( CLMGR_VERBOSE )) || [[ -n $CLMGR_ATTRS ]]; then CL=$LINENO CLMGR_VERBOSE=0 KLIB_HACMP_get_cluster_attributes properties 2>/dev/null fi fi fi log_return_msg "$rc" "$0()" "$LINENO" return $? } # End of "KLIB_HACMP_add_cluster()"