#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/events/node_down_complete.sh 1.2.13.13 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1990,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/events/node_down_complete.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

# Including file containing SCSIPR functions
. /usr/es/sbin/cluster/events/utils/cl_scsipr_event_functions

#########################################################################
#                                                                       #
#       Name:           node_down_complete                              #
#                                                                       #
#       Description:    This event script is called after the node_down #
#                       script successfully completes.                  #
#                       The script checks the nodename, then calls one  #
#                       of the two sub-event scripts appropriately.     #
#                                                                       # 
#       Called by:      cluster manager                                 #
#                                                                       #
#       Calls to:       node_down_local_complete,                       #
#                       node_down_remote_complete                       #
#                                                                       #
#       Arguments:      nodename [graceful | forced]                    #
#                                                                       #
#       Returns:        0       success                                 #
#                       1       failure                                 #
#                       2       bad argument                            #
#                                                                       #
#########################################################################

#########################################################################
#
#   node_down_vg_fence_term
#
#   This will remove CAA volume group fencing for all cluster nodes
#
#   For every volume group managed by PowerHA, set the fence height
#   on all nodes to "read/write".  This allows unrestricted access to
#   the volume group on all nodes - the administrator can vary it on
#   by hand on any node.
#
#########################################################################
function node_down_vg_fence_term {

    typeset PS4_FUNC="node_down_vg_fence_term"
    [[ $VERBOSE_LOGGING == "high" ]] && set -x

    local_VGs=$(print $(lsvg -L 2> /var/hacmp/log/${PROGNAME}.lsvg.err | egrep -v 'rootvg|caavg_private'))
    [[ -e /var/hacmp/log/${PROGNAME}.lsvg.err && ! -s /var/hacmp/log/${PROGNAME}.lsvg.err ]] && rm /var/hacmp/log/${PROGNAME}.lsvg.err
    #
    :   Release any VG fencing in place on all reachable cluster nodes
    #
    for vg in $(clodmget -q "name like *VOLUME_GROUP" -f value -n HACMPresource)
    do
        typeset PS4_LOOP="$vg"
        #
        :   call cl_vg_fence_term to terminate the vg fence on all active nodes
        #   and ignore the inactive ones
        #
        cl_on_cluster -cspoc '-f' -P "cl_vg_fence_term -c $vg" 2>/dev/null

        #
        :   ignore any failure and continue with the node_down process. RC=$?
        #   C-SPOC provides adequate reporting, and the node down process
        #   is not going to be stopped by a failure
        #
    done

    return 0
}

#########################################################################
#
# Main Starts Here
#
#########################################################################

PROGNAME=${0##*/}
export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"

set -a
eval $(cllsparam -n $LOCALNODENAME)
set +a

if [[ $VERBOSE_LOGGING == "high" ]]
then
    set -x
    version='%I%'
fi

#
:   Pick up input
#
export NODENAME=$1
export PARAM=$2

NODE_HALT_CONTROL_FILE="/usr/es/sbin/cluster/etc/ha_nodehalt.lock"

#
# This will be the exit status seen by the Cluster Manager.
# If STATUS is not 0, the Cluster Manager will enter reconfiguration
# All lower-level scripts should pass status back to the caller.
# This will allow a Resource Groups to be processed individaully,
# independent of the status of another resource group.
#
STATUS=0

set -u

if (( $# < 1 ))
then
    echo Usage: $PROGNAME nodename [graceful | forced] 
    exit 2
fi

#
: serial number for this event is $EVENT_SERIAL_NUMBER
#

if [[ $PARAM == "forced" && $NODENAME == $LOCALNODENAME ]]
then
    #
    : If the localnode is being "forced" down, then clear out locations DB now.
    #
    if ! clchdaemons -r -d clstrmgr_scripts -t resource_locator ; then
	cl_log 655 "$PROGNAME: Problem with resource location database in HACMPdaemons ODM." $PROGNAME
	STATUS=1
    fi
fi

#
:   if RG_DEPENDENCIES is set to false by the cluster manager,
:   then resource groups will be processed via clsetenvgrp
#
if [[ $PARAM != "forced" && $RG_DEPENDENCIES == "FALSE" ]]
then
    #
    :	Set the RESOURCE_GROUPS environment variable with the names
    :	of all Resource Groups participating in this event, and export
    :	them to all successive scripts
    #
    set -a
    eval $(clsetenvgrp $NODENAME $PROGNAME $PARAM)
    RC=$?
    set +a
    : exit status of clsetenvgrp $NODENAME $PROGNAME $PARAM is: $RC 
    if [ $RC -ne 0 ]
    then
	STATUS=1
    fi

    #
    :   Process_Resources for parallel-processed resource groups
    #
    process_resources
    RC=$?
    : exit status of process_resources is: $RC
    if (( $RC != 0 ))
    then
        STATUS=1
    fi
fi

#
:   For each participating resource group, serially process the resources
#
LOCALCOMP="N"

#
:   if RG_DEPENDENCIES is set to false by the cluster manager,
:   then resource groups will be processed via clsetenvgrp
#
if [[ $PARAM != "forced" && $RG_DEPENDENCIES == "FALSE" ]]
then

    for group in $RESOURCE_GROUPS
    do
	#
	: All sub-scripts must inherit the same environment
	# values which were set by this script (set -a).
	# e.g.: all sub-scripts must inherit VERBOSE_LOGGING value.
	#
	set -a
	eval $(clsetenvres $group $PROGNAME)
	set +a
	export GROUPNAME=$group
	
	#
	:   Check nodename, then call node_down_local_complete or
	:   node_down_remote_complete accordingly.
	#
	if [[ $NODENAME == "$LOCALNODENAME" ]]
	then
	    LOCALCOMP="Y"
	    clcallev node_down_local_complete 
            : exit status of node_down_local_complete is: $?
	else
	    clcallev node_down_remote_complete $*
            : exit status of node_down_remote_complete is: $?
	fi
	
	#
	:   If ANY failure has occurred, this script should exit accordingly
	#
	if (( $? != 0 ))
	then
	    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required.\n" $PROGNAME $group
	    STATUS=1
	fi
    done
fi

if [[ $PARAM != "forced" && $NODENAME == $LOCALNODENAME ]]
then
    #
    :	Call ss-unload replicated resource methods if they are defined
    #
    METHODS=$(cl_rrmethods2call ss_unload)
    
    for method in $METHODS
    do
	if [[ -x $method ]]
	then
	    if ! $method
	    then
		STATUS=1
	    fi
	fi
    done

    #
    :	If dependencies are configured and node is being "forced" down then
    :   no need to do varyoff for any passive mode VGs 
    # 
    if [[ $RG_DEPENDENCIES == "TRUE" ]] 
    then
        #
        :   If any volume groups were varied on in passive mode when this node
        :   came up, all the prior resource group processing would have left them
        :   in passive mode.  Completely vary them off at this point.
        #
	INACTIVE_VGS=$(lsvg -L | grep -w -v -x -E $(lsvg -L -o | paste -s -d'|' - ) )
	for vg in $INACTIVE_VGS
	do
	    if LC_ALL=C lsvg -L $vg 2>/dev/null | grep -i -q 'passive-only'
	    then
		#
		:   Reset any read only fence height prior to vary off
		#
		cl_set_vg_fence_height -c $vg rw
		RC=$?
		if (( $RC != 0 ))
		then
		    #
		    :   cl_set_vg_fence_height -c $vg rw return code is $RC
		    :   Log any error, but continue.  If this is a real problem, the varyoffvg will fail
		    #
		    rw=$(dspmsg -s 103 cspoc.cat 350 'read only,read/write' | cut -f2 -d,)
		    cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read/write" $PROGNAME $vg $rw
		fi
		
		#
		:   'lsvg <vg name>' will show if a volume group is varied
		:   on in passive mode.  Any such are varied off
		#
		cltime
		varyoffvg $vg
		RC=$?
		cltime
		: rc_varyoffvg = $RC
		#
		:  Force a timestamp update to get timestamps in sync
		:  since timing may prevent LVM from doing so
		#
		cl_update_vg_odm_ts -o -f $vg
		
		#
		:   If VG fencing is in place, restore the fence height to read/only.
		#
		cl_set_vg_fence_height -c $vg ro
		RC=$?
		:   return code from volume group fencing is $RC
		if (( 0 != $RC ))
		then
		    #
		    :   Log any error, but continue. Manual intervention may be needed.
		    #
		    ro=$(dspmsg -s 103 cspoc.cat 350 'read only,read/write' | cut -f1 -d,)
		    cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read/only" $PROGNAME $vg $ro
		fi
	    fi
	done
    fi

    #
    :   remove the flag file used to indicate reconfig_resources
    #
    rm -f /usr/es/sbin/cluster/etc/.hacmp_wlm_config_changed

    #
    :	Run WLM stop script 
    #
    cl_wlm_stop


    # return value will indicate an error, if one occured
    # but we don't really care, so no reason to check
    
fi

if [[ $NODENAME == $LOCALNODENAME ]]
then
    #################################################################
    :	Node is down: Create the lock file that inhibits node halt 
    #################################################################
    /bin/touch $NODE_HALT_CONTROL_FILE;
fi 

#
:   If this is the last node to leave, restore read write access to all volume groups
#
if [[ $PARAM != "forced" ]]
then
    if [[ -z $POST_EVENT_MEMBERSHIP ]]
    then
        #
        :   The last node out turns off fencing on all nodes
        #
        node_down_vg_fence_term
	
        #
        :   Clear the SCSIPR reservation registration 
        #
        typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge)
        if [[ $SCSIPR_ENABLED == Yes ]]
        then
            # Remove the reservation and registration
            node_down_scsipr_term

            # Clear reservation and registration from raw disks
            typeset diskList=$(cllsres | grep -w DISK | cut -f2 -d=)
            if [[ -n $diskList ]]
            then
                diskList=${diskList#\"}
                diskList=${diskList%\"}
                typeset pvid=""
                for pvid in $diskList
                do
                    typeset hdisk=$(lspv -L | grep -w $pvid | awk '{print $1}')
                    if [[ -n $hdisk ]]
                    then
                        clpr_clear $hdisk
                    fi
                done
            fi
            diskList=$(cllsres | grep -w RAW_DISK | cut -f2 -d=)
            if [[ -n diskList ]]
            then
                diskList=${diskList#\"}
                diskList=${diskList%\"}
                typeset uuid=""
                for uuid in $diskList
                do
                    typeset hdisk=$(lspv -u | grep -w $uuid | awk '{print $1}')
                    if [[ -n $hdisk ]]
                    then
                        clpr_clear $hdisk
                    fi
                done
            fi
        fi
    else
        if [[ $NODENAME == $LOCALNODENAME ]]
        then
            #
            : Node is gracefully going down.
            #
            typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge)
            if [[ $SCSIPR_ENABLED == Yes ]]
            then
                #
                : SCSIPR Disk Fencing is enabled.
                : Remove the registration of this node, from all the VGs which are a part of HACMPresource.
                #
                typeset VGs=$(print $(lsvg -L 2> /var/hacmp/log/${PROGNAME}.LSVG.ERR | egrep -vw 'rootvg|caavg_private'))
                [[ -e /var/hacmp/log/${PROGNAME}.LSVG.ERR && ! -s /var/hacmp/log/${PROGNAME}.LSVG.ERR ]] && rm /var/hacmp/log/${PROGNAME}.LSVG.ERR
                typeset VolGrp=""
                for VolGrp in $VGs
                do
                    typeset resgrp=$(clodmget -q "name like '*VOLUME_GROUP' and value = $VolGrp" -f group -n HACMPresource)
                    if [[ -n $resgrp ]]
                    then
                        # Remove registration of the $LOCALNODENAME
                        clpr_removeReg_vg $VolGrp
                    fi
                done
                # Remove registrations and reservations from raw disks 
                typeset diskList=$(cllsres | grep -w DISK | cut -f2 -d=)
                if [[ -n $diskList ]]
                then
                    diskList=${diskList#\"}
                    diskList=${diskList%\"}
                    typeset pvid=""
                    for pvid in $diskList
                    do
                        typeset hdisk=$(lspv -L | grep -w $pvid | awk '{print $1}')
                        if [[ -n $hdisk ]]
                        then
                            clpr_removeReg $hdisk
                        fi
                    done
                fi
                diskList=$(cllsres | grep -w RAW_DISK | cut -f2 -d=)
                if [[ -n $diskList ]]
                then
                    diskList=${diskList#\"}
                    diskList=${diskList%\"}
                    typeset uuid=""
                    for uuid in $diskList
                    do
                        typeset hdisk=$(lspv -u | grep -w $uuid | awk '{print $1}')
                        if [[ -n $hdisk ]]
                        then
                            clpr_removeReg $hdisk
                        fi
                    done
                fi
            fi
        fi
    fi

    #
    :   refresh clcomd, FWIW
    #
    refresh -s clcomd

fi

#
:   This is the final info of all RGs:
#
clRGinfo -p -t 2>&1

return $STATUS
