#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2021. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # 61haes_r721 src/43haes/usr/sbin/cluster/utilities/smcaactrl.sh 1.50 # # Licensed Materials - Property of IBM # # COPYRIGHT International Business Machines Corp. 2011,2016 # All Rights Reserved # # US Government Users Restricted Rights - Use, duplication or # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # # IBM_PROLOG_END_TAG # @(#) 2650eea 43haes/usr/sbin/cluster/utilities/smcaactrl.sh, 726, 2147A_aha726, Apr 14 2021 09:06 PM ################################################################################ # # Name: smcaactrl # # # Function: This routine is a callback script, invoked by CAA. Its purpose # is to notify IBM PowerHA SystemMirror of actions either taken by, # or attempted by, CAA. # # For some CAA actions, this enables SystemMirror to veto/deny # operations, as needed. For example, if the operations are relevant # to SystemMirror and SystemMirror is active. Or if if the operations # have not been initiated as part of normal SystemMirror processing. # In this context, this callback script prevents customers from # running CAA commands that will affect SystemMirror outside of # normal SystemMirror processing (i.e. using SystemMirror interfaces). # For example, once the SystemMirror and CAA clusters have been # created, we do not want a customer running 'chcluster' or # 'rmcluster' on their own. This helps prevent customers from making # mistakes and accidentally damaging their cluster. # # However, operations by various PowerHA routines must be allowed # to proceed. The way that is done is that a PowerHA routine that # is going to invoke a CAA operation creates a file whose name # contains the routine and its PID. If that routine is also # present in the ps output, the CAA operation is considered # legitimate and approved. E.g., if there is a file # /usr/es/sbin/cluster/etc/cldare., # and a ps entry with cldare and that PID, the operation is # allowed. # # This routine can also be used by CAA to notify PowerHA SystemMirror # of changes in the CAA cluster that require actions at the PowerHA # level. For example, if it becomes necessary for CAA to perform an # Automatic Repository Replacement (ARR) to automatically failover to # a backup repository disk, CAA will invoked this callback script to # inform SystemMirror of the change. # # Notes: Naturally, this routine has rather ad-hoc coding, specific to and # dependent on the various PowerHA SystemMirror routines that # legitimately call CAA functions. # # It would require only modest cleverness to bypass the security # check function above: just enter the desired CAA command while # some PowerHA SystemMirror operation is in progress, since this # code cannot really tell where the CAA command originated. # # # Input: As passed by CAA. See getopts and "operands as interpreted" # message below. # # # Output: return code = 0 - Operation is approved # return code = 1 - Operation is not approved # logging to clutils.log # ################################################################################# #================================================== # This simple function is invoked on script exit, # and its sole purpose is to log the result of # this smcaactrl invocation. #================================================== function on_exit { typeset -i rc=$1 if (( $rc == 0 )); then print "smcaactrl:0:[$LINENO]($SECONDS):${TAB}This CAA operation, \"$SUBCOMMAND\", was approved.\n" >>$OP_LOG elif (( $rc == 207 )); then # : Lets every body know migration is in progress # msg="This CAA operation, \"$SUBCOMMAND\", was ***partially*** approved as migration is in progress." print -- "smcaactrl:${rc}:[$LINENO]($SECONDS):${TAB}${msg}" >>$OP_LOG errlogger "$msg" logger "$msg" else # : Give everybody the bad news # msg="This CAA operation, \"$SUBCOMMAND\", was ***not*** approved." print -- "smcaactrl:${rc}:[$LINENO]($SECONDS):${TAB}${msg}" >>$OP_LOG errlogger "$msg" logger "$msg" fi #======================================================================= # Append all logging to the "clutils.log" file in one contiguous block #======================================================================= typeset CLUTIL_LOG=$(clodmget -q "name = clutils.log" -n -f value HACMPlogs) [[ -z $CLUTIL_LOG ]] && CLUTIL_LOG="/var/hacmp/log" CLUTIL_LOG="$CLUTIL_LOG/clutils.log" cat $OP_LOG >>$CLUTIL_LOG rm -f $OP_LOG exit $rc } # End of "on_exit()" #=================================== # Declare and initialize variables #=================================== typeset PID="" SKIP="" REM="" typeset HAETC="/usr/es/sbin/cluster/etc" typeset PHASE="" typeset CLUSTER_UUID="" typeset NODE_UUID="" typeset TARGET_UUID="" typeset TARGET_SHORT_ID="" typeset TARGET_PRIORITY="" typeset SITE_UUID="" typeset SITE_NAME="" typeset SITE_SHORT_ID="" typeset SITE_PRIORITY="" typeset TARGET_NAME="" typeset O_TARGET_UUID="" typeset IS_INITIATOR="" typeset IS_RUNNING_LOCAL="" typeset -i TRANS_ID=0 typeset OLD_HOSTNAME="" typeset NEW_HOSTNAME="" typeset PERSIST_HOSTNAME="" typeset INET_SHORT="" typeset OLD="" typeset NEW="" typeset V_VALUES="" typeset S_VALUES="" typeset LOCAL_NODE="" typeset CLUSTER_ID="" integer node_id=0 #================================================ # Used for Automatic Repository Replacement (ARR) #================================================ typeset -i CLUSTER_TYPE=-1 typeset SITE_ID="" typeset ACTIVE_REPOSITORY_CHANGE="" typeset BACKUP_REPOSITORIES_CHANGE="" # previous active repository typeset PREVIOUS_ACTIVE_REPOSITORY_PVID="" # new active repository typeset NEW_ACTIVE_REPOSITORY_DISKNAME="" typeset NEW_ACTIVE_REPOSITORY_PVID="" # previous backup repositories list typeset PREVIOUS_BACKUP_REPOSITORIES_PVID="" # new backup repositories list typeset NEW_BACKUP_REPOSITORIES_PVID="" # Exported so "on_exit()" can access them export TAB=$'\t' export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)" export SUBCOMMAND="" #============================================================= : Establish comprehensive logging for this important command #============================================================= typeset PS4="smcaactrl:\$?:[\$LINENO](\$SECONDS):${TAB} " LOG_PATH=$(clodmget -q "name = clutils.log" -f value -n HACMPlogs) LOG_PATH=${LOG_PATH:-/var/hacmp/log} export OP_LOG="${LOG_PATH}/smcaactrl.log.$$" # Must be visible within "on_exit()" touch $OP_LOG exec 1>>$OP_LOG exec 2>>$OP_LOG print "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Running smcaactrl at $(/usr/bin/date) with the following parameters:\n\ $*" #================================== : Establish tracing, if requested #================================== LOCAL_NODE=$(get_local_nodename 2>/dev/null) if [[ -z $VERBOSE_LOGGING && -n $LOCAL_NODE ]] then VERBOSE_LOGGING=$(clodmget -q "object = VERBOSE_LOGGING and name = $LOCAL_NODE" -f value -n HACMPnode) fi if [[ $VERBOSE_LOGGING == "high" ]] then set -x version='1.50' fi #================================================= : See if there is a PowerHA SystemMirror cluster #================================================= CLUSTER_ID=$(ODMDIR=/etc/objrepos clodmget -n -f id HACMPcluster) if [[ $CLUSTER_ID != +([[:digit:]]) ]] then # : We do not have a PowerHA SystemMirror cluster defined yet, : so do not object to this CAA operation # print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always approve operations when there is no PowerHA cluster present" on_exit 0 fi typeset Interpreted_operands="" #=================================== : Parse any command-line arguments # # Interpretation of operands based on # information in _config_args in # caa_config.c #=================================== while getopts "img:O:P:T:c:o:t:v:s:" opt do case $opt in i) IS_INITIATOR=y Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"IS_INITIATOR=y" ;; m) IS_RUNNING_LOCAL=y Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"IS_RUNNING_LOCAL=y" ;; O) SUBCOMMAND=$OPTARG Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"subcommand=$SUBCOMMAND" ;; P) PHASE=$OPTARG Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"phase=$PHASE" ;; T) TRANS_ID=$OPTARG Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"trans_ID=$TRANS_ID" ;; c) CLUSTER_UUID=$OPTARG Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"cluster_UUID=$CLUSTER_UUID" ;; t) print -- $OPTARG | IFS=, read TARGET_UUID TARGET_NAME TARGET_SHORT_ID TARGET_PRIORITY [[ -z $TARGET_NAME ]] && DISPLAY_TARGET_UUID="$TARGET_UUID ($(/usr/lib/cluster/clras dumprepos | grep -w $TARGET_UUID | cut -f1 -d' '))" Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"display_target_UUID=$DISPLAY_TARGET_UUID" Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"target_UUID=$TARGET_UUID" [[ -n $TARGET_NAME ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"target_name=$TARGET_NAME" [[ -n $TARGET_SHORT_ID ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"target_short_id=$TARGET_SHORT_ID" [[ -n $TARGET_PRIORITY ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"target_priority=$TARGET_PRIORITY" ;; o) O_TARGET_UUID=$OPTARG if [[ $O_TARGET_UUID == *,* ]] then Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"capabilities=$O_TARGET_UUID" else Display_O_TARGET_UUID=$O_TARGET_UUID if [[ $O_TARGET_UUID != "NO_OTHER_TARGETS" ]] then DISPLAY_O_TARGET_UUID="$O_TARGET_UUID ($(/usr/lib/cluster/clras dumprepos | grep -w $O_TARGET_UUID | cut -f1 -d' '))" fi Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"display_other_target_UUID=$DISPLAY_O_TARGET_UUID" Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"other_target_UUID=$O_TARGET_UUID" fi ;; s) print -- $OPTARG | IFS=, read SITE_UUID SITE_NAME SITE_SHORT_ID SITE_PRIORITY [[ -z $SITE_NAME ]] && DISPLAY_SITE_UUID="$SITE_UUID ($(/usr/lib/cluster/clras dumprepos | grep -w $SITE_UUID | tail -1 | cut -f1 -d' '))" Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"display_site_UUID=$DISPLAY_SITE_UUID" Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"site_UUID=$SITE_UUID" [[ -n $SITE_NAME ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"site_name=$SITE_NAME" [[ -n $SITE_SHORT_ID ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"site_short_id=$SITE_SHORT_ID" [[ -n $SITE_PRIORITY ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"site_priority=$SITE_PRIORITY" ;; v) V_VALUES=$OPTARG print -- $OPTARG | IFS=, read MODIFIED_TUNEABLE NEW_VALUE OLD_VALUE Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"tuneable=$MODIFIED_TUNEABLE" [[ -n $NEW_VALUE ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"new_tuneable_value=$NEW_VALUE" [[ -n $OLD_VALUE ]] && Interpreted_operands=${Interpreted_operands:+"${Interpreted_operands}\n\t"}"old_tuneable_value=$OLD_VALUE" ;; *) # : Unexpected operand ignored # print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Unexpected operand \"-${opt} $OPTARG\" was ignored" ;; esac done print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}operands as interpreted:\n\ \t${Interpreted_operands}" #======================================================================== : Special handling for operations associated with host name change # In the CHECK phase, reject any hostname change via hostname command # (i.e. where the CuAt is not updated) # In the POST phase update the HACMPnode COMMPATH with the new hostname #======================================================================== if [[ $SUBCOMMAND == 'MOD_NODE' ]] then if [[ $PHASE == "CHECK" ]] then # : CHECK phase processing # if [[ $MODIFIED_TUNEABLE == "hostname" && \ $IS_INITIATOR == 'y' ]] then if [[ -n $LOCAL_NODE ]] then # : If the host name is changed for the local node, and : the node ID is already set up, this change can proceed # node_id=$(clodmget -q "object = COMMUNICATION_PATH and name = $LOCAL_NODE" -f node_id -n HACMPnode) if [[ -z $node_id ]] || (( $node_id == 0 )) then # : Do not allow hostname change operation during HA migration : because config changes are not supported during migration # /usr/es/sbin/cluster/utilities/cl_migcheck "ANY" MIGCHECK_RC=$? if [[ $MIGCHECK_RC != "0" ]] then print -- "smcaactrl:207:[$LINENO]($SECONDS):${TAB}PowerHA migration detected, reject hostname change\n" on_exit 207 # 207 = SCRIPT_SKIP_COOFG # -> CAA will start, but skip MOD_NODE operation for the hostname change else print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}No PowerHA migration detected, proceed with checks ...\n" fi NEW_HOSTNAME=$NEW_VALUE OLD_HOSTNAME=$OLD_VALUE # : This is a hostname change, from $OLD_HOSTNAME to $NEW_HOSTNAME # PERSIST_HOSTNAME=$(LC_ALL=C lsattr -E -l inet0 -a hostname|cut -d ' ' -f2) INET_SHORT=$(echo $PERSIST_HOSTNAME | cut -d\. -f1) print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Old Hostname is $OLD_HOSTNAME\n" print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}New Hostname is $NEW_HOSTNAME\n" print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}CuAt inet0 Hostname is $PERSIST_HOSTNAME\n" DHN_IGNORE=$(clodmget -f honorTempHostNameChanges -n HACMPcluster) print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}honorTempHostNameChange is $DHN_IGNORE" SHORT_LONG_CHG=1 # : Is the old hostname a short name and the new one a FQDN or vice versa # if ( [[ -n $(echo $NEW_HOSTNAME | grep "\.") ]] && [[ -z $(echo $OLD_HOSTNAME | grep "\.") ]] ) || \ ( [[ -n $(echo $OLD_HOSTNAME | grep "\.") ]] && [[ -z $(echo $NEW_HOSTNAME | grep "\.") ]] ) then # : Pull the first token off the old and new names # OLD=$(echo $OLD_HOSTNAME | cut -d\. -f1) NEW=$(echo $NEW_HOSTNAME | cut -d\. -f1) # : if they are equal this is a hostname change from long to short or vice versa # if ( [[ $OLD == $NEW ]] || [[ $NEW == $INET_SHORT ]] ) then SHORT_LONG_CHG=0 fi fi if (( $SHORT_LONG_CHG == 1 )) && [[ $PERSIST_HOSTNAME != $NEW_HOSTNAME ]] && (( $DHN_IGNORE == 0 )) then # # The purpose of this logic is to deal with situations where the user # sets the host name via the "hostname" command to something that will # persists only for the duration of this IPL. This would be the case # where the application start script sets the hostname to the same as # the DNS name for the service IP address - which is a common practice. # # In such a case, we do not wish to change the CAA node name, nor force # a cluster wide synchronization. Failing the change here stops the # CAA node name from changing, but of course has no effect on the value # the customer has already set via the hostname command. However, CAA # will retry every ten minutes, generating lots of log activity, if # nothing else. # # This can be safely done because such a temporary hostname change does # not affect the IP address on any interface; cluster communications # stil work with the persistent hostname (the inet0 hostname attribute). # # Note that if the "honorTempHostNameChange" attribute in HACMPcluster # is set to 1, the user change of the hostname is allowed to change the # CAA node name. That is, "honor" means "treat temporary as permanent". # # Since this would then require that a verify & sync be done after every # such hostname change, this is not normaly something one would want, # and, indeed, if you are reading this to figure out if you want to # make such a change, the answer is "No!" # print -- "smcaactrl:1:[$LINENO]($SECONDS):${TAB}Hostname change from $OLD_HOSTNAME to $NEW_HOSTNAME is not persistent, so do not allow CAA to change its node name\n" on_exit 1 fi fi fi fi elif [[ $PHASE == 'POST' ]] then # : POST phase processing # # : Allow for unsettled CAA interface : and for the fact that not every MOD_NODE is a host name change # if [[ -n $V_VALUES ]] then if [[ $MODIFIED_TUNEABLE == "hostname" ]] then NEW_HOSTNAME=$NEW_VALUE OLD_HOSTNAME=$OLD_VALUE else print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow MOD_NODE operations that are not a hostname change" on_exit 0 # Not a host name change command fi else # # Code in this block should not be needed for CAA past the 1318 build # and can be removed at some later time # OLD_HOSTNAME=$O_TARGET_UUID NEW_HOSTNAME=$TARGET_UUID if [[ -z $OLD_HOSTNAME || -z $(odmget -q "value = $OLD_HOSTNAME and object = COMMUNICATION_PATH" HACMPnode) ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow MOD_NODE operations that are not a hostname change" on_exit 0 # Not a host name change command fi fi # : CAA informs us that host name $OLD_HOSTNAME has been change to $NEW_HOSTNAME # print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}CAA posts notification that a host name change from $OLD_HOSTNAME to $NEW_HOSTNAME is complete\n" if [[ $IS_INITIATOR == 'y' ]] then if [[ -n $LOCAL_NODE ]] then node_id=$(clodmget -q "object = COMMUNICATION_PATH and name = $LOCAL_NODE" -f node_id -n HACMPnode) if [[ -z $node_id ]] || (( $node_id == 0 )) then # : If the host name was changed on this node, and we do not yet : have a valid node ID to tie PowerHA node names to CAA host : names, update the COMMUNICATION_PATH. # # If a non-zero node ID is in place, it is the same ID for the # node at the PowerHA, RSCT, and CAA levels. This allows the # routines that need to find the CAA host name to look it up # by ID when needed, and not save a changable host name. # OLD_NODE=$(odmget -q "object = COMMUNICATION_PATH and name = $LOCAL_NODE" HACMPnode) if [[ -n $OLD_NODE ]] then print -- "$OLD_NODE" | \ sed 's/value = ".*"/value = "'$NEW_HOSTNAME'"/' | \ odmchange -q "object = COMMUNICATION_PATH and name = $LOCAL_NODE" -o HACMPnode # : Now, force a requirement for synchronization # print "HACMPcluster: handle=0" | odmchange -o HACMPcluster # : Log for tracking, to console, syslog and error log # print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Communication path for $LOCAL_NODE updated to $NEW_HOSTNAME, and HACMPcluster handle reset\n" typeset user_msg=$(dspmsg -s 27 scripts.cat 23 "The host name has been changed to ${NEW_HOSTNAME}. The PowerHA SystemMirror configuration should be synchronized (smitty cm_ver_and_sync) at the earliest opportunity\n" $NEW_HOSTNAME | tee /dev/console) logger "$user_msg" errlogger "$user_msg" else print "smcaactrl:0:[$LINENO]($SECONDS):${TAB}get_local_nodename was not successful; communications path was not updated\n" fi fi fi fi # Update hostname changes to GUI to make sure GUI server has the latest agent hostnames in their database # As agents are depends on the hostnames to identify the agents. typeset hostname=$(/usr/bin/hostname) if [[ "$hostname" == "${NEW_HOSTNAME}" ]] then typeset event_time_stamp=$(/usr/bin/date +"%a %b %d %H:%M:%S") LOCALNODENAME=$(ODMDIR=$DCD get_local_nodename) if [[ -z $LOCALNODENAME ]] then print "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Not able to get the local node name, can't send the hostname change event to UI \n" else #=========================== : Update the clevents file #=========================== typeset event_seq_no=1 if [[ -f $CLEVENT_LOGDIR/clevents ]]; then event_seq_no=$(/usr/bin/tail -n 1 $CLEVENT_LOGDIR/clevents 2>/dev/null) if [[ -n $event_seq_no && $event_seq_no == +([[:digit:]])\|* ]]; then event_seq_no=${event_seq_no%%\|*} event_seq_no=${event_seq_no##+(0)} (( event_seq_no = event_seq_no + 1 )) fi fi [[ $event_seq_no != +([[:digit:]]) ]] && event_seq_no=1 typeset HOSTNAME_RESULT="FAILED" typeset NOTIFIER="" if [[ -s /usr/es/sbin/cluster/ui/agent/lib/notify-event.ksh && \ -x /usr/es/sbin/cluster/ui/agent/lib/notify-event.ksh ]] then NOTIFIER="/usr/es/sbin/cluster/ui/agent/lib/notify-event.ksh" elif [[ -s /usr/es/sbin/cluster/ui/agent/lib/notify-event && \ -x /usr/es/sbin/cluster/ui/agent/lib/notify-event ]] then NOTIFIER="/usr/es/sbin/cluster/ui/agent/lib/notify-event" fi typeset HOSTNAME_STRING="${OLD_HOSTNAME}:${NEW_HOSTNAME}" if [[ -n $NOTIFIER ]]; then #=============================== : Notify GUI for hostname change #=============================== $NOTIFIER -i "$event_seq_no" \ -c "NODE" \ -o "$LOCALNODENAME" \ -a "HOSTNAME_CHANGE" \ -v "$HOSTNAME_RESULT" \ -t "$event_time_stamp" \ -d "$HOSTNAME_STRING" fi fi fi on_exit 0 fi fi if [[ $SUBCOMMAND == 'REPLACE_REPOS' ]] then #=============================================== : Special handling for REPLACE_REPOS operation #=============================================== if [[ $PHASE == "CHECK" ]] then #======================================= : Check for "clmgr replace repository" #======================================= if [[ $(/usr/bin/ps -e -o pid -o args |\ /usr/bin/egrep 'clvt_kshHandler replace repository|clvt_kshHandler modify cluster' |\ /usr/bin/egrep -i -w 'repository|repositories' |\ /usr/bin/grep -v grep) != *([[:space:]]) ]] then print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}A \"clmgr replace repository\" operation is in progress. Allowing it." else # : We are in the case of a Automatic Repository Replacement from CAA # print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}Automatic Repository Replacement (ARR) from CAA [PHASE=\"$PHASE\", SUBCOMMAND=\"$SUBCOMMAND\"]\n." fi on_exit 0 elif [[ $PHASE == 'POST' ]] then #============================================================================== # If this was initiated by a clmgr command, then no problem. # It was not initiated by a clmgr command, then it is an Automatic repository # Replacement (ARR) from CAA, and we have to modify the PowerHA SystemMirror # ODM accordingly. #============================================================================== #======================================= : Check for "clmgr replace repository" #======================================= if [[ $(/usr/bin/ps -e -o pid -o args |\ /usr/bin/egrep 'clvt_kshHandler replace repository|clvt_kshHandler modify cluster' |\ /usr/bin/egrep -i -w 'repository|repositories' |\ /usr/bin/grep -v grep) != *([[:space:]]) ]] then print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}A \"clmgr replace repository\" operation is in progress. Allowing it." on_exit 0 else # : We are in the case of a Automatic Repository Replacement from CAA # 1. The new active repository must be removed from the list # of backup repositories, if it were in this list. # 2. The previous active repository must be added to the list # of backup repositories. # # : Get parameters # NEW_ACTIVE_REPOSITORY_DISKNAME=$TARGET_NAME NEW_ACTIVE_REPOSITORY_PVID="" typeset -i rc=0 # # TARGET_NAME should contain the new repository diskname. # This is not the case if the disk belongs to another site, # and this disk is not visible from the current node. This # is the case is this disk is known from the site the # current node belongs too. # if [[ -z $TARGET_NAME ]]; then # # TARGET_NAME is not set. # For this case, we have at least the UUID of the disk, # and we can interrogate one of the nodes on which this # UUID is defined to get its PVID # typeset NODES_USING_THIS_DISK="" NODE_USING_THIS_DISK="" typeset CL_RSH_OUTPUT="" # Loop around nodes, first one which enables to find PVID breaks print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}cmd : lscluster -d | grep -wp \"uUid : $TARGET_UUID\" | grep -w \"Node\" | grep -vw \"Node UUID\"" NODES_USING_THIS_DISK=$(lscluster -d | grep -wp "uUid : $TARGET_UUID" | grep -w "Node" | grep -vw "Node UUID") rc=$? print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}cmd : lscluster -d | grep -wp \"uUid : $TARGET_UUID\" | grep -w \"Node\" | grep -vw \"Node UUID\" RC: $rc" if (( rc == 0 )); then NODES_USING_THIS_DISK=$(print -- "$NODES_USING_THIS_DISK" | sed 's/Node //') if [[ -n $NODES_USING_THIS_DISK ]]; then for NODE_USING_THIS_DISK in $NODES_USING_THIS_DISK; do print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}cmd : cl_rsh -n $NODE_USING_THIS_DISK \"lspv -u | grep -w $TARGET_UUID\"" CL_RSH_OUTPUT=$(cl_rsh -n $NODE_USING_THIS_DISK "lspv -u | grep -w $TARGET_UUID") rc=$? print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}cmd : cl_rsh -n $NODE_USING_THIS_DISK \"lspv -u | grep -w $TARGET_UUID\" RC: $rc" if (( rc == 0 )); then if [[ -n $CL_RSH_OUTPUT ]]; then print -- "CL_RSH_OUTPUT=$CL_RSH_OUTPUT" print -- "$CL_RSH_OUTPUT" | read SKIP NEW_ACTIVE_REPOSITORY_PVID REM if [[ -n $NEW_ACTIVE_REPOSITORY_PVID ]]; then print -- "NEW_ACTIVE_REPOSITORY_PVID=$NEW_ACTIVE_REPOSITORY_PVID" break fi else print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}WARNING: cannot get PVID from UUID=$TARGET_UUID from NODE=$NODE_USING_THIS_DISK" fi else print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}WARNING: cannot get PVID from UUID=$TARGET_UUID from NODE=$NODE_USING_THIS_DISK" fi done else print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}ERROR: cannot get any node using this disk UUID=$TARGET_UUID" fi else print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}ERROR: cannot get any node using this disk UUID=$TARGET_UUID" fi else # # TARGET_NAME is set. # For this case, we can get the PVID from the diskname # through lspv on the current node. # typeset LSPV_OUTPUT="" print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}cmd : lspv | grep -w \"$NEW_ACTIVE_REPOSITORY_DISKNAME\"" LSPV_OUTPUT=$(lspv | grep -w "$NEW_ACTIVE_REPOSITORY_DISKNAME") rc=$? print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}cmd : lspv | grep -w \"$NEW_ACTIVE_REPOSITORY_DISKNAME\" RC: $rc" if (( rc == 0 )); then print -- "$LSPV_OUTPUT" | read SKIP NEW_ACTIVE_REPOSITORY_PVID REM if [[ -n $NEW_ACTIVE_REPOSITORY_PVID ]]; then print -- "NEW_ACTIVE_REPOSITORY_PVID=$NEW_ACTIVE_REPOSITORY_PVID" fi else print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}ERROR: cannot get PVID from DISKNAME=$NEW_ACTIVE_REPOSITORY_DISKNAME from NODE=$NODE_USING_THIS_DISK" fi fi # # At this step, the NEW_ACTIVE_REPOSITORY_PVID should be set # typeset ARR_MSG="" if [[ -n $NEW_ACTIVE_REPOSITORY_PVID ]]; then print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}Automatic Repository Replacement (ARR) from CAA [PHASE=\"$PHASE\", SUBCOMMAND=\"$SUBCOMMAND\"] : NEW_ACTIVE_REPOSITORY_DISKNAME=\"$NEW_ACTIVE_REPOSITORY_DISKNAME\" NEW_ACTIVE_REPOSITORY_PVID=\"$NEW_ACTIVE_REPOSITORY_PVID\"" # Get the type of cluster CLUSTER_TYPE=$(clodmget -f multi_site_lc -n HACMPcluster) if (( $CLUSTER_TYPE == 1 )) then # : Cluster type = LC # SITE_ID=$(lscluster -m | grep -w "$SITE_UUID" | awk '{print $2}' | head -1) else # : Cluster type = SC or NSC # SITE_ID=0 fi # : Collect data to prepare change in SystemMirror ODM # PREVIOUS_ACTIVE_REPOSITORY_PVID=$(clodmget -n -q "id=$SITE_ID" -f repository HACMPsircol) PREVIOUS_BACKUP_REPOSITORIES_PVID=$(clodmget -n -q "id=$SITE_ID" -f backup_repository HACMPsircol) NEW_BACKUP_REPOSITORIES_PVID=$(print $PREVIOUS_BACKUP_REPOSITORIES_PVID | sed "s/$NEW_ACTIVE_REPOSITORY_PVID//g") NEW_BACKUP_REPOSITORIES_PVID=${NEW_BACKUP_REPOSITORIES_PVID:+$NEW_BACKUP_REPOSITORIES_PVID }$PREVIOUS_ACTIVE_REPOSITORY_PVID print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}Automatic Repository Replacement (ARR) from CAA [PHASE=\"$PHASE\", SUBCOMMAND=\"$SUBCOMMAND\"] : CLUSTER_TYPE=\"$CLUSTER_TYPE\", SITE_UUID=\"$SITE_UUID\" SITE_ID=\"$SITE_ID\" PREVIOUS_ACTIVE_REPOSITORY_PVID=\"$PREVIOUS_ACTIVE_REPOSITORY_PVID\" PREVIOUS_BACKUP_REPOSITORIES_PVID=\"$PREVIOUS_BACKUP_REPOSITORIES_PVID\" NEW_BACKUP_REPOSITORIES_PVID=\"$NEW_BACKUP_REPOSITORIES_PVID\"." # : Perform change in SystemMirror ODM # ODM_CLASS="HACMPsircol:" ACTIVE_REPOSITORY_CHANGE="repository=$NEW_ACTIVE_REPOSITORY_PVID" echo $ODM_CLASS $ACTIVE_REPOSITORY_CHANGE | odmchange -o HACMPsircol -q "id=$SITE_ID" BACKUP_REPOSITORIES_CHANGE="backup_repository=$NEW_BACKUP_REPOSITORIES_PVID" echo $ODM_CLASS $BACKUP_REPOSITORIES_CHANGE | odmchange -o HACMPsircol -q "id=$SITE_ID" # : Log for tracking, to console, syslog and error log # ARR_MSG="INFO: \ Propagated the Automatic Repository Replacement (ARR) change from CAA to PowerHA SystemMirror.\n\ 1. Backup repositories list was :\n ${PREVIOUS_BACKUP_REPOSITORIES_PVID}.\n\ 2. Set new active repository to : ${NEW_ACTIVE_REPOSITORY_PVID}, and removed it from backup repositories list.\n\ 3. Added the previous active repository : ${PREVIOUS_ACTIVE_REPOSITORY_PVID} to the backup repositories list.\n\ 4. Backup repositories list is now :\n ${NEW_BACKUP_REPOSITORIES_PVID}.\n" print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}$arr_msg" logger -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}$arr_msg" errlogger -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}$arr_msg" # : Update HACMPcluster : Now, force a requirement for synchronization # ODM_CLASS="HACMPcluster:" HANDLE_CHANGE="handle=0" echo $ODM_CLASS $HANDLE_CHANGE | odmchange -o HACMPcluster on_exit 0 else ARR_MSG="ERROR: \ Cannot propagate the Automatic Repository Replacement (ARR) change from CAA to PowerHA SystemMirror.\n" print -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}$arr_msg" logger -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}$arr_msg" errlogger -- "smcaactrl:2:[$LINENO]($SECONDS):${TAB}$arr_msg" on_exit 1 fi fi fi ## end of PHASE == 'POST' fi ## end of SUBCOMMAND == 'REPLACE_REPOS' # : 'Always approve' notifications are handled here # if [[ $PHASE != 'CHECK' ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always approve notifications that are not checks: PHASE == \"$PHASE\"" on_exit 0 fi if [[ $IS_INITIATOR != 'y' ]] then #============================== : We are not the initiator #============================= if [[ $O_TARGET_UUID == "ALL_TARGETS" ]] && [[ $SUBCOMMAND == @(RM|STOP)_@(NODE|SITE) ]] ; then #============================================================== :If target is ALL_NODES and the operation is disruptive, :and there is a cluster manager running on the local node, :do not approve it, even if the initiator is not the local node. #=============================================================== if [[ -n $IS_RUNNING_LOCAL ]]; then #========================================= : Since this operation is for the local : node, we can run the check right here. #========================================= if ! clcheck_server cthags then print -- "smcaactrl:1:[$LINENO]($SECONDS):${TAB}Do not approve ,if target is ALL_NODES and the operation is disruptive and there is a cluster manager running on the local node" on_exit 1 fi fi else print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always approve notifications that are not for this node: IS_INITIATOR == \"$IS_INITIATOR\"" on_exit 0 fi fi if [[ $SUBCOMMAND == 'JOIN_NODE' || \ $SUBCOMMAND == 'ADD_DISK' || \ $SUBCOMMAND == 'RM_DISK' || \ $SUBCOMMAND == 'MOD_NODE' ]] then # : Hostname changes that we would have needed to react to were handled above # print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always approve notifications for join node, add/remove disk, and change host name: SUBCOMMAND == \"$SUBCOMMAND\"" on_exit 0 fi #=================== : Check for cldare #=================== /usr/bin/ps -e -o pid -o args |\ /usr/bin/grep -w 'cldare' |\ /usr/bin/grep -v grep |\ while read PID REM do # : cldare will background a process if cluster services are running : So check for all the PIDs # if [[ -e ${HAETC}/cldare.$PID ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow operations done by cldare" on_exit 0 fi done #======================= : Check for clmigcheck #======================= /usr/bin/ps -e -o pid -o args |\ /usr/bin/grep -w clmigcheck |\ /usr/bin/grep -v grep |\ read PID REM if [[ $PID == +([[:digit:]]) && \ -e ${HAETC}/clmigcheck.$PID ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow operations done by clmigcheck" on_exit 0 fi #==================== : Check for clmkcaa #==================== /usr/bin/ps -e -o pid -o args |\ /usr/bin/grep -w clmkcaa |\ /usr/bin/grep -v grep |\ read PID REM if [[ $PID == +([[:digit:]]) && \ -e ${HAETC}/clmkcaa.$PID ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow operations done by clmkcaa" on_exit 0 fi #======================== : Check for cl_cfg_sm_rt #======================== /usr/bin/ps -e -o pid -o args |\ /usr/bin/grep -w cl_cfg_sm_rt |\ /usr/bin/grep -v grep |\ read PID REM if [[ $PID == +([[:digit:]]) && \ -e ${HAETC}/cl_cfg_sm_rt.$PID ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow operations done by cl_cfg_sm_rt" on_exit 0 fi if [[ $SUBCOMMAND == @(RM|STOP|ADD)_@(NODE|SITE) ]] then #==================================== : Check for a running clmgr process #==================================== /usr/bin/ps -e -o pid -o args |\ /usr/bin/egrep -w 'resource_delete|resource_add' |\ /usr/bin/grep -v grep |\ read PID REM if [[ $PID == +([[:digit:]]) && \ -e ${HAETC}/clmgrrmclstr.$PID ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow \"clmgr delete cluster\" operations" on_exit 0 fi fi #============================= : Check for "clmgr add node" #============================= if [[ $SUBCOMMAND == *NODE* && \ $(/usr/bin/ps -e -o pid -o args |\ /usr/bin/grep -w 'resource_add node' |\ /usr/bin/grep -v grep) != *([[:space:]]) ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow \"clmgr add node\" operations" on_exit 0 fi #============================================= : Check for "clmgr modify cluster NODES=..." #============================================= if [[ $SUBCOMMAND == *NODE* && \ $(/usr/bin/ps -e -o pid -o args |\ /usr/bin/grep -w 'resource_modify cluster' |\ /usr/bin/grep -i 'NODES=' |\ /usr/bin/grep -v grep) != *([[:space:]]) ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow \"clmgr modify cluster\" to modify the cluster node list" on_exit 0 fi #======================================== : Check for "clmgr verify/sync cluster" #======================================== if [[ $SUBCOMMAND == *NODE* && \ $(/usr/bin/ps -e -o pid -o args |\ /usr/bin/grep 'clvt' |\ /usr/bin/grep -e 'verify cluster' -e 'sync cluster' |\ /usr/bin/grep -v grep) != *([[:space:]]) ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Always allow \"clmgr verify/sync cluster\" operations" on_exit 0 fi #======================================================= : Check for emgr or clmgr process, which can STOP_NODE #======================================================= if [[ $SUBCOMMAND == @(RM|STOP|START)_@(NODE|SITE) ]] then #=================================================== : Check for a running emgr/clmgr process. Get PID. #================================================== /usr/bin/ps -e -o pid -o args |\ /usr/bin/egrep -we emgr -we "resource_online" -we "resource_offline" |\ /usr/bin/grep -v grep |\ read PID REM integer ALLOW=0 # forbidden unless approved if [[ $SUBCOMMAND == @(STOP|START)_@(NODE|SITE) ]] then if [[ -n $IS_RUNNING_LOCAL ]]; then if clcheck_server cthags then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow a $SUBCOMMAND operation for the local node because cthags is not active" ALLOW=1 fi elif [[ -n $TARGET_NAME ]]; then if /usr/sbin/clrsh $TARGET_NAME /usr/es/sbin/cluster/utilities/clcheck_server cthags then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow a $SUBCOMMAND operation for node $TARGET_NAME because cthags is not active there" ALLOW=1 fi elif [[ -n $TARGET_UUID ]]; then typeset DATA=$(LC_ALL=C /usr/sbin/lscluster -m | grep -p $TARGET_UUID) TARGET_NAME=$(print -- "$DATA" | grep "Node name:") TARGET_NAME=${TARGET_NAME##*:} TARGET_NAME=${TARGET_NAME##+([[:space:]])} if [[ -n $TARGET_NAME ]]; then if /usr/sbin/clrsh $TARGET_NAME /usr/es/sbin/cluster/utilities/clcheck_server cthags then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow a $SUBCOMMAND operation for node $TARGET_NAME because cthags is not active there" ALLOW=1 fi fi fi fi if ( [[ $PID == +([[:digit:]]) ]] && \ [[ -e $HAETC/emgr.$PID || -e $HAETC/clmgr.$PID ]] ) || \ (( ALLOW )) then #===================================================== : We cannot allow anybody to STOP/RM/NODE/SITE while : PowerHA SystemMirror cluster services are running. : Take care to check this status on the correct node. #===================================================== ALLOW=0 # forbidden unless approved if [[ $SUBCOMMAND == @(RM|STOP)* ]]; then if [[ -n $IS_RUNNING_LOCAL ]]; then #========================================= : Since this operation is for the local : node, we can run the check right here. #========================================= if clcheck_server cthags then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow a $SUBCOMMAND operation for the local node because cthags is not active" ALLOW=1 fi elif [[ -s $HAETC/clmgr.$PID ]] then #========================================================== : Since this operation is targeted at a remote node, the : cluster services check needs to be performed on that : node via clrsh. That requires converting the target : node UUID into a proper hostname for clrsh. Since this : is a clmgr operation, we can try checking the "license" : file for the needed information. #========================================================== typeset HA_NAME="" UUID="" REST="" grep -w "$TARGET_UUID" $HAETC/clmgr.$PID | read HA_NAME TARGET_NAME UUID REST if [[ -n $TARGET_NAME ]]; then if /usr/sbin/clrsh $TARGET_NAME /usr/es/sbin/cluster/utilities/clcheck_server cthags then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow a $SUBCOMMAND operation for node $TARGET_NAME because cthags is not active there" ALLOW=1 fi fi elif [[ -n $TARGET_NAME ]] then if /usr/sbin/clrsh $TARGET_NAME /usr/es/sbin/cluster/utilities/clcheck_server cthags then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow a $SUBCOMMAND operation for node $TARGET_NAME because cthags is not active there" ALLOW=1 fi fi else print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}It should always be OK to allow a $SUBCOMMAND operation to start CAA" ALLOW=1 fi if (( ALLOW )); then on_exit 0 else #============================= : Give the user the bad news #============================= print -- "smcaactrl:1:[$LINENO]($SECONDS):${TAB}Operation $SUBCOMMAND is rejected because PowerHA SystemMirror cluster services are running. Cannot change CAA state now.\n" if [[ -e $HAETC/emgr.$PID ]]; then typeset user_msg=$(dspmsg -s 27 scripts.cat 24 "An e-fix cannot be applied to CAA at this time because PowerHA SystemMirror is active. PowerHA SystemMirror cluster services should be stopped (\"smitty clstop\") on this node, and the emgr command retried.\n" | tee /dev/console) logger "$user_msg" errlogger "$user_msg" fi on_exit 1 fi fi fi #============================================================= : Check for CAA which can locally STOP/START NODE directly : during AIX Live Update. Check that clstrmgr is not running #============================================================= if [[ -n $IS_RUNNING_LOCAL && \ $SUBCOMMAND == @(STOP|START)_NODE ]] then # : Check if clstrmgr is not running # # Note: Stopping PowerHA SystemMirror cluster services # leaves the daemon running # if ! ps -e | grep -qw clstrmgr then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow a $SUBCOMMAND on the local node, if the PowerHA SystemMirror cluster manager is not running" on_exit 0 fi fi #=============================== : Check for MOD_TUNE operation #=============================== if [[ $SUBCOMMAND == "MOD_TUNE" && -n $V_VALUES ]] then if [[ $MODIFIED_TUNEABLE == @(communication_mode|link_timeout|node_down_delay|remote_hb_factor)* ]] then print -- "smcaactrl:1:[$LINENO]($SECONDS):${TAB}The following tunable cannot be set directly: $MODIFIED_TUNEABLE. This tunable may only be set as part of a cluster configuration operation." on_exit 1 elif [[ $MODIFIED_TUNEABLE == "node_timeout" ]] then #==================================== : Check for a running cl_dr process #==================================== set -A CL_DR_PROCS "cl_dr" "cl_2dr" for CL_DR_PROC in ${CL_DR_PROCS[@]}; do /usr/bin/ps -e -o pid -o args | /usr/bin/grep -w $CL_DR_PROC | /usr/bin/grep -v grep | read PID REM # cl_2dr creates pidfile by name of cl_dr.$$ as well if [[ $PID == +([[:digit:]]) && -e ${HAETC}/cl_dr.$PID ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allowing a modification of node_timeout because an LPM operation is in progress." on_exit 0 fi done print -- "smcaactrl:1:[$LINENO]($SECONDS):${TAB}The node_timeout cannot be set directly outside of an LPM operation." on_exit 1 elif [[ $MODIFIED_TUNEABLE == @(local_merge_policy|network_fdt|local_network_fdt|dr_enabled) ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB}Allow changes to \"$MODIFIED_TUNEABLE\"." on_exit 0 fi # : Anything not allowed for - $V_VALUES - gets rejected below # fi #============================================================================= : The enhanced cluster new CAA capabilities features are added in CAA_LVL_5 : These new style of events can only be handled with this or above : PowerHA SystemMirror level, the older PowerHA SystemMirror running : with new CAA still needs old style of events. : Hence the commit level phase PRE check is added in CAA to approve : or reject based on PowerHA SystemMirror level. #============================================================================= if [[ $SUBCOMMAND == "COMMIT_LVL" && $PHASE == "CHECK" ]] then if [[ $MODIFIED_TUNEABLE == "SPLT_MRG" || $MODIFIED_TUNEABLE == "CAA_DR" || $MODIFIED_TUNEABLE == "COMDISK" || $MODIFIED_TUNEABLE == "4KDISK" ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB} Always approve $MODIFIED_TUNEABLE CAP from CAA [PHASE=\"$PHASE\", SUBCOMMAND=\"$SUBCOMMAND\"].\n" on_exit 0 fi fi #============================================================================= : In PowerHA DR enabled solution, when complete primary site goes down, : replicated DR site should come online. For this CAA has to rebuild on DR site : During the rebuild process CAA sends a request to approve CAA_DR operation. #============================================================================= if [[ $SUBCOMMAND == "CAA_DR" && $PHASE == "CHECK" ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB} Always approve CAA_DR operation from CAA [PHASE=\"$PHASE\", SUBCOMMAND=\"$SUBCOMMAND\"].\n" on_exit 0 fi #===================================================================== : This check is to allow adding or deleting a new site. : This is required for cluster type conversion. : Example - standard cluster to a linked cluster #===================================================================== if [[ $SUBCOMMAND == @(ADD|RM)_@(NODE|SITE) && $PHASE == "CHECK" ]] then print -- "smcaactrl:0:[$LINENO]($SECONDS):${TAB} Always approve SITE operations from CAA [PHASE=\"$PHASE\", SUBCOMMAND=\"$SUBCOMMAND\"].\n" on_exit 0 fi #============================================================================= : We fell thru the cracks to a condition we are not aware of, so we cannot : allow the CAA operation to continue. To enable debugging of the cause of : this condition, some useful information is logged to "clutils.log", below. #============================================================================= /usr/bin/ps -e -o pid -o args | sed "s/^/smcaactrl:1:[$LINENO]($SECONDS):${TAB}/g" ls -alt ${HAETC}/*.[[:digit:]]* 2>/dev/null | sed "s/^/smcaactrl:1:[$LINENO]($SECONDS):${TAB}/g" print -- "smcaactrl:1:[$LINENO]($SECONDS):${TAB}Unanticipated operation $SUBCOMMAND not permitted" on_exit 1 #============================================================================== # The following, comment block attempts to enforce coding standards when this # file is edited via emacs or vim. This block _must_ appear at the very end # of the file, or the editor will not find it, and it will be ignored. #============================================================================== # Local Variables: # indent-tabs-mode: nil # tab-width: 4 # End: #============================================================================== # vim: tabstop=4 shiftwidth=4 expandtab #==============================================================================