#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG                                                    
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2020,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                      
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/events/reconfig_resource_complete.sh 1.86.1.2 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1996,2013 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/events/reconfig_resource_complete.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

#########################################################################
#                                                                       #
#       Name:           reconfig_resource_complete                      #
#                                                                       #
#       Description:    This script is called when a reconfig           #
#                       resource event is completed.                    #
#                                                                       #
#       Arguments:      none                                            #
#                                                                       #
#       Returns:        0       success                                 #
#                       1       failure                                 #
#                       2       bad argument                            #
#                                                                       #
#########################################################################

#########################################################################
#
#       Name:           isRGOwner
#
#       Description:    Determines the state (primary, secondary, etc)
#                       of the group of interest on the node of interest
#
#       Arguments:      rg - resource group of interest
#                       node - node of interest
#
#       Returns:        none - echo's the state to stdout
#
#########################################################################
function isRGOwner {
    typeset PS4_FUNC=$0
    [[ $VERBOSE_LOGGING == "high" ]] && set -x
    typeset NODENAME=$2
    typeset SEARCH_RG=$1
    typeset rg
    typeset state
    typeset node
    typeset cstate
    typeset startup_pref
    typeset fallover_pref
    typeset fallback_pref
    
    #
    :	We should be using AUXILLIARY_ACTIONS / PRIMARY_ACTIONS to determine whether we are primary / secondary
    #
    if [[ -s ${TEMPPATH_RRI}clRGinfo.out ]] 
    then
	cat ${TEMPPATH_RRI}clRGinfo.out | while IFS=: read rg state node cstate startup_pref fallover_pref fallback_pref 
	do
	    if [[ "$SEARCH_RG" == "$rg" && $node == $NODENAME ]]
	    then
		if [[ $state == "ONLINE SECONDARY" ]]
		then
		    echo "secondary"
		    return
		fi
		if [[ $state == "ONLINE" ]]
		then
		    echo "primary"
		    return
		fi
	    fi
	done

	echo "unknown"
    fi
}


#########################################################################
#
#       Name:           rri_get_secondary_sustained
#
#       Description:    Lists any resource groups in secondary sustained
#
#       Arguments:      None
#
#       Returns:        none - echo's the group name(s) to stdout
#
#########################################################################
function rri_get_secondary_sustained {
    typeset PS4_FUNC=$0
    [[ $VERBOSE_LOGGING == "high" ]] && set -x

    typeset AUXILLIARY_ACTIONS_TMP=$AUXILLIARY_ACTIONS

    for group in $RESOURCE_GROUPS
    do
	echo $AUXILLIARY_ACTIONS_TMP | read action AUXILLIARY_ACTIONS_TMP
	case $action in
	    S )
		echo $group
	    ;;
	esac
    done
}

#########################################################################
#
#       Name:           rri_acquire_variables
#
#       Description:    Helper function used by rri_acquire_secondary to
#                       setup the RRI acquire variables for the instances
#                       of the specified resource group
#
#       Arguments:      type of online (primary or secondary)
#                       resource group of interest
#
#       Returns:        1 if there are any actions (events) required for
#                       this group
#
#########################################################################
function rri_acquire_variables {
    typeset PS4_FUNC=$0
    [[ $VERBOSE_LOGGING == "high" ]] && set -x

    #
    :   Generate the RRI acquire variables for primary / secondary for the specified RG
    #
    typeset RGTYPE=$1       # primary or secondary depending on when we're called
    typeset RGNAME=$2

    typeset NODE
    echo $3 | read NODE othernodes
    typeset OWNER_TYPE=$(isRGOwner $RGNAME $NODE)
    typeset RRI_RUN=0

    for res in $RRI_RESOURCES; do
	ACQUIRE_FILENAME=$TEMPPATH_RRI$RGNAME.ACQUIRE_RESOURCES.$res

	if [[ -f $ACQUIRE_FILENAME ]]
	then
	    RESOURCES_TO_ACQUIRE=$(cat $ACQUIRE_FILENAME | sed -e s/\"//g)
	    if [[ -n $RESOURCES_TO_ACQUIRE ]]
	    then
		#
		:	If we have been asked to only report RGs for primary instances, then only report
		:	primary instances
		#
		if [[ $OWNER_TYPE == $RGTYPE ]]
		then
		    export $res="$RESOURCES_TO_ACQUIRE"
		    #
		    : If this is a GMVG we need to specify the same list of resources for VOLUME_GROUP
		    #
		    if [[ $res == "GMVG_REP_RESOURCE" ]]
		    then
			if [[ -z $VOLUME_GROUP ]]
			then
			    export VOLUME_GROUP="$RESOURCES_TO_ACQUIRE"
			else
			    export VOLUME_GROUP="$RESOURCES_TO_ACQUIRE $VOLUME_GROUP"
			fi
			export FILESYSTEM="ALL"
		    fi
		    RRI_RUN=1
		fi
	    fi
	fi
    done
    return $RRI_RUN
}

#########################################################################
#
#       Name:           rri_acquire_secondary
#
#       Description:    Prepares the environment for a call to node_up_local
#                       in order to acquire secondary instances of a
#                       resource group
#
#       Arguments:      resource group which will have secondary instances
#                       brought online
#
#       Returns:        exit status from node_up_local
#
#########################################################################
function rri_acquire_secondary {
    typeset PS4_FUNC=$0
    [[ $VERBOSE_LOGGING == "high" ]] && set -x

    typeset STATUS=0
    #
    :   Resource group to acquire
    #
    typeset RGNAME=$1

    #
    :   Clear out the resource variables prior to setting any variables
    #
    for res in $RESOURCES $RRI_RESOURCES; do
	export $res=
    done
    
    if ! rri_acquire_variables "secondary" $RGNAME $LOCALNODENAME
    then
	export PRINCIPAL_ACTION="NONE"
	export ASSOCIATE_ACTION="SUSTAIN"
	export AUXILLIARY_ACTION="ACQUIRE_SECONDARY"
	export VG_RR_ACTION="ACQUIRE"
	export FOLLOWER_ACTION="ACQUIRE_SECONDARY"
	export GROUPNAME=$RGNAME

	if ! node_up_local_complete
	then
	    #
	    :	If ANY failure has occurred, this script should exit accordingly
	    #
	    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $RGNAME. Manual intervention required." $PROGNAME $RGNAME
	    STATUS=1
	fi
    fi
    return $STATUS
}

###############################################################################
#
#  Name:  setup_persistent_labels
#
#  This function calls cl_configure_persistent label for local node in the
#  cluster that has any persistent labels defined.
#
#  Arguments:    none
#  Returns:      nothing
#
###############################################################################

function setup_persistent_labels
{
    [[ $VERBOSE_LOGGING == "high" ]] && set -x

    #
    :   Configure the persistent labels on local node
    #
    if [[ -n $(clodmget -q "function = persistent AND nodename = $LOCALNODENAME" -f nodename -n HACMPadapter) ]]
    then
        cl_configure_persistent_address config_all -P
    fi
}


#
:   Main starts here
#
export PROGNAME=${0##*/}
export EVENT_TYPE=$PROGNAME       # Tell other scripts who called them

export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
DCD="/etc/es/objrepos"
SCD="/usr/es/sbin/cluster/etc/objrepos/stage"
ACD="/usr/es/sbin/cluster/etc/objrepos/active"

#
# this is the working directory for DARE - the location must
# match the declaration in the other scripts involved
#
TEMPPATH="/var/hacmp/log/HACMP_RESOURCES/"
export TEMPPATH_RRI=/var/hacmp/log/HACMP_REP_RESOURCES/

set -a
eval $(ODMDIR=$ACD cllsparam -n $LOCALNODENAME)
set +a

[[ $VERBOSE_LOGGING == "high" ]] && {
    set -x
    version='1.86.1.2'
}

#
# these lists must match those in the other event scripts
#
RRI_RESOURCES="PPRC_REP_RESOURCE ERCMF_REP_RESOURCE SVCPPRC_REP_RESOURCE GMVG_REP_RESOURCE \
SR_REP_RESOURCE TC_REP_RESOURCE GENXD_REP_RESOURCE"

RESOURCES="DISK VOLUME_GROUP CONCURRENT_VOLUME_GROUP FILESYSTEM FSCHECK_TOOL \
RECOVERY_METHOD EXPORT_FILESYSTEM APPLICATIONS MOUNT_FILESYSTEM SERVICE_LABEL \
INACTIVE_TAKEOVER SSA_DISK_FENCING TAKEOVER_LABEL NFS_HOST \
AIX_CONNECTIONS_SERVICES COMMUNICATION_LINKS AIX_FAST_CONNECT_SERVICES \
SHARED_TAPE_RESOURCES FORCED_VARYON \
SIBLING_NODES FOLLOWER_ACTION PPRC_REP_RESOURCE GMD_REP_RESOURCE \
SR_REP_RESOURCE TC_REP_RESOURCE GENXD_REP_RESOURCE \
ERCMF_REP_RESOURCE SVCPPRC_REP_RESOURCE VG_AUTO_IMPORT FS_BEFORE_IPADDR \
EXPORT_FILESYSTEM_V4 STABLE_STORAGE_PATH WPAR_NAME"

UDRESTYPE_LIST=$(cludrestype -l -h | awk ' /USERDEFINED/ { printf("%s ",$1); }' )

RESOURCES="$RESOURCES $UDRESTYPE_LIST"
NFS_RESOURCES="MOUNT_FILESYSTEM NFS_HOST"

#
:   This will be the exit status seen by the Cluster Manager.
:   If STATUS is not 0, event_error will run.
:   All lower-level scripts should pass status back to the caller.
:   This will allow a Resource Groups to be processed individaully,
:   independent of the status of another resource group.
#
STATUS=0
RUN_SCRIPT="false"

set -u

if (( $# != 0 ))
then
    cl_log 1035 "Usage: $PROGNAME\n" $PROGNAME
    exit 2
fi
env

#
:   At this point, if rg dependencies are configured, the SCD is in the place
:   of the ACD, so that we could run the rg_move events.
:   lets swap them back:
#
if [[ $RG_DEPENDENCIES == "TRUE" && $ACQUIRE_COMPLETE_PHASE_RESOURCES == "FALSE" ]]
then
    if ! mv $ACD $SCD
    then
	CMD="mv $ACD $SCD"
	cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD
	exit 1
    fi

    if ! mv $TEMPPATH/active $ACD
    then
	CMD="mv $TEMPPATH/active $ACD"
	cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD
	exit 1
    fi
fi

#
:   Ensure that the ACD directory exists
#
if [[ ! -d $ACD ]]
then
    cl_log 1042  "$ACD does not exist\n" $ACD
    exit 1
fi

#
:   Ensure that the SCD directory exists
#
if [[ ! -d $SCD ]]
then
    cl_log 1042 "$SCD does not exist\n " $SCD
    exit 1
fi

#
:   Create the temporary directory
#
if [[ ! -d "$TEMPPATH" ]]
then
    if ! mkdir $TEMPPATH
    then
	cl_log 1043 "Unable to make $TEMPPATH directory\n" $TEMPPATH
        exit 1
    fi
fi

export ODMDIR=$SCD

#
#   If there are no rg dependencies or we are in the complete (second) phase
#   of the event there is additional work to be done for certain resource types
#
if [[ $RG_DEPENDENCIES == "FALSE" || $ACQUIRE_COMPLETE_PHASE_RESOURCES == "TRUE" ]]
then
    #
    :	If this is a two node cluster and exported filesystems exist, then when the
    :	cluster topology is stable notify rpc.statd of the changes
    #
    if (( 2 == $(clodmget -q "object = COMMUNICATION_PATH" -f name -n HACMPnode | wc -l) ))
    then
	RESOURCE_GROUPS=$(clodmget -f group -n HACMPgroup)
	for group in $RESOURCE_GROUPS
	do
	    EXPORTLIST=$(clodmget -q "group=$group AND name=EXPORT_FILESYSTEM" -f value -n HACMPresource)
	    if [[ -n "$EXPORTLIST" ]]
	    then
		if ! cl_update_statd
		then
		    cl_log 1074 "$PROGNAME: Failure occurred while processing cl_update_statd.\n" $PROGNAME
		    STATUS=1
		fi
	    fi
	    break
	done
    fi

    if [[ -f /tmp/.RPCLOCKDSTOPPED ]]
    then
	#
	:   If RPC lock daemon has been stopped,
	:   wait for it to finish stopping
	:   then restart it
	#
	rm -f /tmp/.RPCLOCKDSTOPPED
	for (( COUNT=0 ; COUNT<60 ; COUNT++ )) 
	do
	    if LC_ALL=C lssrc -s rpc.lockd | grep stopping
	    then
		sleep 1
	    else
		break
	    fi
	done
	startsrc -s rpc.lockd
    fi

    #
    :	This will return the list of resource groups to be acquired/sustained
    :	for the resource groups that are sustained, the resources that may need
    :	to be acquired are in temporary files
    #
    set -a
    eval $(clsetenvgrp $LOCALNODENAME "reconfig_resource_complete")
    RC=$?
    set +a
    if (( $RC != 0 ))
    then
	STATUS=1
    fi
    
    SUSTAINED_GROUPS=""
    ACQUIRED_GROUPS=""
    ASSOCIATE_GROUPS=""
    
    SECONDARY_SUSTAINED_GROUPS=$(rri_get_secondary_sustained)

    if [[ $PROGNAME == "reconfig_resource_complete_secondary" ]]
    then
	for group in $SECONDARY_SUSTAINED_GROUPS
	do
	    #
	    :	Acquire secondary instances if any for this resource group
	    #
	    if ! rri_acquire_secondary $group
	    then
		STATUS=1
	    fi
	done
    fi #end if secondary

    count=0
    for group in $RESOURCE_GROUPS
    do
	count=$(( count + 1 ))
	echo $PRINCIPAL_ACTIONS | cut -f $count -d ' ' | read action
	if [[ $action == "S" ]] 
	then
	    SUSTAINED_GROUPS="$SUSTAINED_GROUPS $group"
	fi
	
	#
	:   We only care about acquisition groups if no rg dependencies 
	:   configured.
	#
        if [[ $RG_DEPENDENCIES == "FALSE" ]]
        then
	    if [[ $action == "A" ]]
	    then
	       ACQUIRED_GROUPS="$ACQUIRED_GROUPS $group"
	    fi
	fi

	echo $ASSOCIATE_ACTIONS | cut -f $count -d ' ' | read associate_action
	
	#
	:   Collect the groups that have only NFS actions
	#
	if [[ $action == "N" ]]; then
	    ASSOCIATE_GROUPS="$ASSOCIATE_GROUPS $group"
	fi
	
    done

    if [[ $PROGNAME == "reconfig_resource_complete" ]] 
    then
	if [[ $RG_DEPENDENCIES == "FALSE" ]]
	then  
	    for group in $ACQUIRED_GROUPS
	    do
		#
		:   Set the Resource Environment variables to the list of
		:   resources in this group in the SCD
		#
		export ODMDIR=$SCD
		  
		set -a
		eval $(clsetenvres $group $PROGNAME)
		set +a
		export GROUPNAME=$group
		 
		#
		:   Acquire these resources
		#
	      
		#
		:   Get filesystems
		#
		if ! node_up_remote_complete $LOCALNODENAME
		then 
		    #
		    :	If ANY failure has occurred, this script should exit accordingly
		    #
		    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group
		    STATUS=1
		fi
   
		#
		:   Get applications/complete
		#
		if ! node_up_local_complete
		then
		    #
		    :	If ANY failure has occurred, this script should exit accordingly
		    #
		    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group
		    STATUS=1
		fi
	    done
	fi
	
	for group in $ASSOCIATE_GROUPS
	do
	    #	  
	    :	Set the Resource Environment variables to the list of
	    :	resources in this group in the SCD
	    #	  
	    export ODMDIR=$SCD

	    set -a
	    eval $(clsetenvres $group $PROGNAME)
	    set +a
	    export GROUPNAME=$group

	    associate_action=""

	    count=0
	    for setenvgrp_group in $RESOURCE_GROUPS
	    do
		count=$(( count + 1 ))
		if [[ $setenvgrp_group == $group ]] 
		then
		    echo $ASSOCIATE_ACTIONS | cut -f $count -d ' ' | read associate_action
		fi
	    done
	
	    if [[ $associate_action != "MO" ]] 
	    then
		for res in $NFS_RESOURCES
		do
		    export $res=""
		    VARIABLE=""
                    #
                    :   Check to see if ACQUIRE_RESOURCES file exists
                    #
                    if [[ -f ${TEMPPATH}${group}.ACQUIRE_RESOURCES.${res} ]]
		    then
		        #	
		        :   Walk through the ACQUIRE_RESOURCES file, exporting the environment variables found
		        #
		        for variable in $(cat $TEMPPATH$group.ACQUIRE_RESOURCES.$res | cut -d'"' -f2)
		        do
			    if [[ -n $variable ]]
			    then
			        RUN_SCRIPT="true"
			        if [[ -z $VARIABLE ]]
			        then
				    VARIABLE=${variable}
			        else
				    VARIABLE="${VARIABLE} ${variable}"
			        fi
			    fi
		        done
		        export $res="$VARIABLE"
		    fi
		done
	    else
		RUN_SCRIPT="true"
	    fi
	
	    #
	    :	Acquire these resources
	    #
 	
	    #
	    :	Get NFS filesystems
	    #
	    if [[ $RUN_SCRIPT == "true" ]]
	    then 
		if ! node_up_remote_complete $LOCALNODENAME
		then
		    #
		    :	If ANY failure has occurred, this script should exit accordingly
		    #
		    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group
		    STATUS=1
		fi
	    fi
	done

	#
        :   And now work with the sustained resource groups
	#
	for group in $SUSTAINED_GROUPS
	do
	  
	    RUN_SCRIPT="false"
	    #
	    :	Set the Resource Environment variables to the list of
	    :	resources in this group in the ACD.  This is  to
	    :	obtain NFSMOUNT_LABEL if it is appropriate
	    #
	    set -a
	    eval $(clsetenvres $group $PROGNAME)
	    set +a
	    export GROUPNAME=$group
	    # 
	    :	PRINCIPAL_ACTION ASSOCIATE_ACTION AUXILLIARY_ACTION VG_RR_ACTION
	    :	are also set via this call
	    #

	    #
	    :	Export the RRI variables
	    #
	    rri_acquire_variables "primary" $group $LOCALNODENAME

	    #
	    :	Force the node_up_local if we have found RRI resources to acquire
	    #
	    if (( $? == 1 )); then
		RUN_SCRIPT=true
	    fi

	    #
	    :	Clear the current environment variables
	    #
	    for res in $RESOURCES
	    do
		export $res=""
		VARIABLE=""
                #
                :   Check to see if ACQUIRE_RESOURCES file exists
                #
                if [[ -f ${TEMPPATH}${group}.ACQUIRE_RESOURCES.${res} ]]
		then
		    #
		    :   Walk through the ACQUIRE_RESOURCES file, exporting the 
		    :   environment variables found
		    #
		    for variable in $(cat $TEMPPATH$group.ACQUIRE_RESOURCES.$res | cut -d'"' -f2)
		    do
		        if [[ -n $variable ]]
		        then
			    RUN_SCRIPT="true"
			    if [[ -z $VARIABLE ]]
			    then
			        VARIABLE=${variable}
			    else
			        VARIABLE="${VARIABLE} ${variable}"
			    fi
		        fi
		    done
		    export $res="$VARIABLE"
		fi
	    done
	  
	    #
	    :	Acquire these resources
	    #
	    if [[ $RUN_SCRIPT == "true" ]]
	    then
		#
		:   mount filesystems
		#
		if ! node_up_remote_complete $LOCALNODENAME
		then
		    #
		    :	If ANY failure has occurred, this script should exit accordingly
		    #
		    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group
		    STATUS=1
		fi
		#
		:   Get applications/complete
		#
		if ! node_up_local_complete
		then
		    #
		    :	If ANY failure has occurred, this script should exit accordingly
		    #
		    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group
		    STATUS=1
		fi
	    fi
	done
    fi #endif primary

    #
    :	Work on join_cleanup
    #
    if [[ $RG_DEPENDENCIES == "TRUE" && $ACQUIRE_COMPLETE_PHASE_RESOURCES == "TRUE" ]]
    then
	#	
	:   Go through the list of resource groups sustaining as primary,
	:   and call join_cleanup for all of them when working on the 
	:   secondary instances resources
	#	
	for group in $SUSTAINED_GROUPS
	do
	    for siblingGroup in $SIBLING_GROUPS
	    do
		if [[ $group == $siblingGroup ]]
		then
		    RUN_SCRIPT="false"
		    #
		    :	Set the Resource Environment variables to the list of
		    :	resources in this group in the ACD.  This is  to
		    :	obtain NFSMOUNT_LABEL if it is appropriate
		    #
		    set -a
		    eval $(clsetenvres $group $PROGNAME)
		    set +a
		    export GROUPNAME=$group
		    
		    #
		    :	PRINCIPAL_ACTION ASSOCIATE_ACTION AUXILLIARY_ACTION VG_RR_ACTION
		    :	are also set via this call
		    #

		    #
		    :	Export the RRI variables
		    #
		    rri_acquire_variables "secondary" $group $SIBLING_NODES
		
		    if (( $? == 1 ))
		    then
			set -a
			eval $(clsetrepenv $siblingGroup)
			set +a
			export GROUPNAME=$siblingGroup
		    
			if [[ $PROGNAME == "reconfig_resource_complete_secondary" ]]
			then
			    #
			    :	As the secondary comes before the primary, assume no siblings,
			    :	as the primary instance's DARE'd in rep resources have 
			    :	not been acquired
			    #
			    export SIBLING_RELEASING_NODES=""
			    export SIBLING_ACQUIRING_NODES=""
			    export SIBLING_NODES=""
			    export PRINCIPAL_ACTION="SUSTAIN"
			    export AUXILLIARY_ACTION="NONE"
			    
			fi
		    
			if [[ $PROGNAME == "reconfig_resource_complete" ]]
			then
			    #
			    :   No need to do anything 
			    #
			    continue
			fi

			METHODS=$(cl_rrmethods2call join_cleanup)
			for method in $METHODS
			  do
			  if [[ -x $method ]]
			  then
			      if ! $method $PROGNAME $LOCALNODENAME
			      then
				  STATUS=1
			      fi
			  fi
			done
		    fi
		fi
	    done
	done

	for group in $SECONDARY_SUSTAINED_GROUPS
	do
	    for siblingGroup in $SIBLING_GROUPS
	    do
		if [[ $group == $siblingGroup ]]
		then
		    RUN_SCRIPT="false"
		    #
		    :	Set the Resource Environment variables to the list of
		    :	resources in this group in the ACD.  This is  to
		    :	obtain NFSMOUNT_LABEL if it is appropriate
		    #
		    set -a
		    eval $(clsetenvres $group $PROGNAME)
		    set +a
		    export GROUPNAME=$group
		    
		    #
		    :	PRINCIPAL_ACTION ASSOCIATE_ACTION AUXILLIARY_ACTION VG_RR_ACTION
		    :	are also set via this call
		    #
		    
		    #
		    :	Export the RRI variables
		    #
		    rri_acquire_variables "primary" $group $SIBLING_NODES

		    #
		    #	Force the node_up_local if we have found RRI resources to acquire
		    #
		    if (( $? == 1 ))
		    then
			set -a
			eval $(clsetrepenv $siblingGroup)
			set +a
			export GROUPNAME=$siblingGroup
			
			if [[ $PROGNAME == "reconfig_resource_complete_secondary" ]]
			then
			    continue
			fi
		    
			if [[ $PROGNAME == "reconfig_resource_complete" ]]
			then
			    #
			    :	When we acquire the primary instances resource, assume at 
			    :	the location of the secondary instance that we are acquiring
			    :	the primary instance
			    #
			    export SIBLING_RELEASING_NODES=""
			    export SIBLING_ACQUIRING_NODES=$SIBLING_NODES
			    export SIBLING_NODES=""
			    export PRINCIPAL_ACTION="NONE"
			    export AUXILLIARY_ACTION="SUSTAIN"
			fi
		    
			METHODS=$(cl_rrmethods2call join_cleanup)
			for method in $METHODS
			do
			    if [[ -x $method ]]
			    then
			        if ! $method $PROGNAME $LOCALNODENAME
			        then
			  	    STATUS=1
			        fi
			    fi
			done
		    fi
		fi
	    done
	done
    fi

    #    
    :	If a resource group in this DARE is now homeless,
    :	then we need to put it into an error state
    #    
    for GROUP in $HOMELESS_GROUPS
    do
	cl_RMupdate rg_error $GROUP $PROGNAME
    done
fi # end if no rg dependencies or in complete phase

if [[ $PROGNAME == "reconfig_resource_complete" ]]
then
    #
    :	Save the ACD, copy SCD to ACD
    #
    if [[ $RG_DEPENDENCIES == "TRUE" && $ACQUIRE_COMPLETE_PHASE_RESOURCES == "TRUE" ]]
    then
	if ! mv $ACD $TEMPPATH/
	then
	    CMD="mv $ACD $TEMPPATH/"
	    cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD
	    exit 1
	fi

	if ! mv $SCD $ACD
	then
	    CMD="mv $SCD $ACD"
	    cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD
	    exit 1
	fi

	#
	: now that the SCD is gone we need to
	#
	export ODMDIR=$ACD

	#
	:   Note: If resource group dependencies configured, SCD is now the ACD, and
	:   the ACD has been saved on TEMPPATH
	:   now we can process the rg_move events.
	#
    else
	#
	:   Rename the SCD to the ACD
	#
	rm -rf $ACD
	if ! mv $SCD $ACD
	then
	    CMD="mv $SCD $ACD"
	    cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD
	    exit 1
	fi

	#
	:   Now that the SCD is gone we need to
	#
	export ODMDIR=$ACD

	#
	:   Clean up
	#
	rm -rf $TEMPPATH
	# 
	:   Update the set of errnotify stanzas for Selective Fallover
	#
	clmkerrnotify -u
    fi

    #
    :   Configure any defined persistent labels on local node.
    #
    setup_persistent_labels

fi #endif primary

#
# refresh clcomd so it picks up any changes made during execution
#
refresh -s clcomd

exit $STATUS
