#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/events/node_down.sh 1.67.1.4 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1990,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG
# @(#)  7d4c34b 43haes/usr/sbin/cluster/events/node_down.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

#########################################################################
#                                                                       #
#       Name:           node_down                                       #
#                                                                       #
#       Description:    This event script is called when a node         #
#                       leaves the cluster.                             #
#                       The script checks to see whether the nodename   #
#                       is the local node or not, then calls sub-event  #
#                       scripts accordingly.                            #
#                                                                       #
#       Called by:      cluster manager                                 #
#                                                                       #
#       Calls to:       node_down_local, node_down_remote               #
#                                                                       #
#       Arguments:      nodename [graceful | forced]                    #
#                                                                       #
#       Returns:        0       success                                 #
#                       1       failure                                 #
#                       2       bad argument                            #
#                                                                       #
#########################################################################


#########################################################################
#
:     node_down Starts Here
#
#########################################################################

PROGNAME=${0##*/}
export PATH=$(/usr/es/sbin/cluster/utilities/cl_get_path all)

set -a
eval $(cllsparam -n $LOCALNODENAME)
set +a

if [[ $VERBOSE_LOGGING == "high" ]]
then
    set -x
    version='%I%'
fi

export NODENAME=$1
export PARAM=$2

#
#   All lower level scripts should pass status back to the caller.
#   This will allow Resource Groups to be processed individaully,
#   independent of the status of another resource group.
#
integer STATUS=0

AIX_SHUTDOWN=${AIX_SHUTDOWN:-"false"}

set -u

if (( $# < 1 )) ; then
  cl_echo 1034 "Usage: $PROGNAME nodename [graceful | forced]\n" $PROGNAME
  exit 2
fi

#
: serial number for this event is $EVENT_SERIAL_NUMBER
#

#
:   Clean up NFS state tracking
#
UPDATESTATDFILE="/usr/es/sbin/cluster/etc/updatestatd"
rm -f /tmp/.RPCLOCKDSTOPPED
rm -f $UPDATESTATDFILE
UPDATESTATD=0
export UPDATESTATD

#
:   For RAS debugging, the result of ps -edf is captured at this time
#
: begin ps -edf
  ps -edf
: end ps -edf

#
:   If RG_DEPENDENCIES is not false, all RG actions are taken via rg_move events.
#
if [[ $PARAM != "forced" && $RG_DEPENDENCIES == FALSE ]]
then
    #
    :	Set RESOURCE_GROUPS to all RG names participating in this event
    #
    set -a
    eval $(clsetenvgrp $NODENAME $PROGNAME $PARAM)
    RC=$?
    set +a
    : exit status of clsetenvgrp $NODENAME $PROGNAME $PARAM is: $RC
    if (( $RC != 0 )) ; then
	STATUS=1
    fi

    #
    :	Process_Resources for parallel-processed resource groups
    :	If RG_DEPENDENCIES is true, then this call is responsible for
    :	starting the necessary rg_move events.
    #
    if ! process_resources
    then
        STATUS=1
    fi

    if [[ -f $UPDATESTATDFILE ]]
    then
        #
        :   since rpc.statd got updated during process_resources, dont do it again
        #
        UPDATESTATD=1
        rm -f $UPDATESTATDFILE
    fi

    #
    :	For each participating RG, serially process the resources
    #
    for group in $RESOURCE_GROUPS
    do
	set -a
	eval $(clsetenvres $group $PROGNAME)
	set +a
	export GROUPNAME=$group
	
	#
	:   call node_down_local or node_down_remote accordingly.
	#
        typeset RETURN_VALUE
	if [[ $NODENAME == $LOCALNODENAME ]] ; then
	    clcallev node_down_local
            RETURN_VALUE=$?
            : exit status of node_down_local is: $RETURN_VALUE
	else
	    clcallev node_down_remote $*
            RETURN_VALUE=$?
            : exit status of node_down_remote is: $RETURN_VALUE
	fi
	
	#
	:   If ANY failure has occurred, this script should exit accordingly
	#
	if (( $RETURN_VALUE != 0 ))
  	then
	    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group
	    STATUS=1
	fi
	UPDATESTATD=1
	
    done

    if [[ -f /tmp/.RPCLOCKDSTOPPED ]]
    then
	#
	:   rpc.lockd for NFS2/3 was previously told to stop.
	:   Wait up to 60 seconds for it to stop completely.
	#
	rm -f /tmp/.RPCLOCKDSTOPPED
	for (( count=0 ; count<60 ; count++ ))
	do
	    if LC_ALL=C lssrc -s rpc.lockd | grep -i stopping
	    then
		sleep 1
	    else
		#
		:   No longer in 'stopping' state, so presumably stopped
		#
		break
	    fi
	done
	#
	:   Now that rpc.lockd is completely stopped, restart it
	#
	lssrc -s rpc.lockd
	startsrc -s rpc.lockd
	: exit status of startsrc -s rpc.lockd is: $?
    fi
fi			    #	RG_DEPENDENCIES=false

#
:  Processing specific to the local node
#
if [[ $NODENAME == $LOCALNODENAME ]]  
then
    #
    :	 Stopping cluster services on $NODENAME with the $PARAM option
    #
    if [[ $PARAM != "forced" ]]
    then 
	#
	#   If a VG is in passive mode, it will not show up in "'lsvg -o'".
	#
	INACTIVE_VGS=$(lsvg -L | grep -w -v -x -E $(lsvg -L -o | paste -s -d'|' - ) )

        #
        if [[ -n $INACTIVE_VGS ]]
        then
		:   Found inactive VGs. For those that are online in passive
                :   mode, remove any read only fencing, then varyoff
        fi
	for vg in $INACTIVE_VGS 
	do
	    PS4_LOOP="$vg"
	    if LC_ALL=C lsvg -L $vg 2>/dev/null | grep -i -q 'passive-only' 
	    then
		#
		#   This VG is online in passive mode, varyoff
		#   First remove any read only fencing, to allow varyoff 
		#
		cl_set_vg_fence_height -c $vg rw
		RC=$?
		if (( 0 != $RC ))
		then
                    #
                    #   Manual intervention may be needed.
                    #
                    echo "$PROGNAME: Volume group $vg fence height could not be set to read/write" 
		fi
		cltime
		varyoffvg $vg
		RC=$?
		cltime
		: varyoffvg $vg returned $RC
                
		#
		:   Force an update to get the LVM time stamps in sync
		#   since timing may prevent LVM from doing so.
		#
		cl_update_vg_odm_ts -o -f $vg
		
		#
		:   Try to set the fence height to read/only for $vg
		#
		cl_set_vg_fence_height -c $vg ro
		RC=$?
		:   return code from volume group fencing is $RC
		if (( 0 != $RC ))
		then
                    #
                    #   Log any error, but continue. Manual intervention may be needed.
                    #
                    echo "$PROGNAME: Volume group $vg fence height could not be set to read/only" 
		fi
	    fi
	done
	unset PS4_LOOP
    fi

    #
    :	update the location DB to indicate this node is going down
    #
    if ! clchdaemons -r -d clstrmgr_scripts -t resource_locator ; then
	echo "$PROGNAME: clchdaemons -r -d clstrmgr_scripts -t resource_locator FAILED"
	STATUS=1
    fi
else
    #
    :	$NODENAME, is not the local node, handle fencing for any VGs marked as "'CRITICAL'".
    #
    cl_fence_vg $NODENAME
fi
 
#
# If aix is being shutdown, the clstrmgr will call node_down directly (instead
# of going through the rp steps) so that we have a better chance of actually
# running the event before aix starts killing off processes and services.
# If this is the case, we want to call node_down_complete here.
# This must match AIX_SHUTDOWN_STRING from clstrmgr.h
#
if [[ -n $AIX_SHUTDOWN && $AIX_SHUTDOWN == "true" ]]
then
    #
    # Note that we background this with a short sleep - this is to allow
    # node_down to exit before node_down_complete runs
    #
    sleep 1 && clcallev node_down_complete $NODENAME $PARAM &
fi

exit $STATUS
