#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2020,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/events/node_up_complete.sh 1.1.13.1 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1990,2013 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG

# @(#)  7d4c34b 43haes/usr/sbin/cluster/events/node_up_complete.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

#########################################################################
#                                                                       #
#     Name:       node_up_complete                                      #
#                                                                       #
#     Description:  This script is called when the node_up script       #
#             successfully completes.                                   #
#             The script checks the name of the node, then              #
#             calls one of the two sub-event script                     #
#             accordingly.                                              #
#                                                                       #
#     Called by:    cluster manager                                     #
#                                                                       #
#     Calls to:     node_up_local_complete, node_up_remote_complete     #
#                                                                       #
#     Arguments:    nodename and start mode                             #
#                                                                       #
#     Returns:    0     success                                         #
#             1     failure                                             #
#             2     bad argument                                        #
#                                                                       #
#########################################################################

#########################################################################
# Does a union of SIBLING_ACQUIRING_GROUPS and SIBLING_RELEASING_GROUPS
# and removes any duplicate group names
#########################################################################
listSiblingGroups () {
    typeset PS4_FUNC="listSiblingGroups"
    [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
    
    for group in $SIBLING_ACQUIRING_GROUPS $SIBLING_RELEASING_GROUPS
    do
	echo $group
    done | sort -u
    
    return
}

#########################################################################
#
# Main Starts Here
#
#########################################################################

typeset PROGNAME=${0##*/}
export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"

if [[ $VERBOSE_LOGGING == "high" ]]; then
    set -x
    version='%I%'
fi

set -a
eval $(cllsparam -n $LOCALNODENAME)
set +a

NODENAME=$1
typeset -i RC=0
typeset -i UPDATESTATD=0

typeset LPM_IN_PROGRESS_DIR="/var/hacmp/.lpm_in_progress"
typeset LPM_IN_PROGRESS_PREFIX="lpm"
typeset STATE_FILE="/var/hacmp/cl_dr.state"

#
# This will be the exit status seen by the Cluster Manager.
# If STATUS is not 0, the Cluster Manager will enter reconfiguration
# All lower-level scripts should pass status back to the caller.
# This will allow a Resource Groups to be processed individaully,
# independent of the status of another resource group.
#
STATUS=0

set -u

if (( $# < 1 ))
then
  echo Usage: $PROGNAME nodename mode
  exit 2
fi

typeset START_MODE=""
if (( $# > 1 )) && [[ $2 == "forced" ]]
then
    START_MODE="forced"
fi

#
: serial number for this event is $EVENT_SERIAL_NUMBER
#


typeset -i RPCLOCKDSTOPPED=0
if [[ -f /tmp/.RPCLOCKDSTOPPED ]]
then
  RPCLOCKDSTOPPED=1
  rm -f /tmp/.RPCLOCKDSTOPPED
fi

# If this is a two node cluster and exported filesystems exist, then when the
# cluster topology is stable notify rpc.statd of the changes
if (( 2 == $(clnodename | wc -l) ))
then
    RESOURCE_GROUPS=$(clodmget -f group -n HACMPgroup)
    for group in $RESOURCE_GROUPS
    do
        EXPORTLIST=$(clodmget -q "group=$group AND name=EXPORT_FILESYSTEM" -f value -n HACMPresource)
        if [[ -n "$EXPORTLIST" ]]
        then
            UPDATESTATD=1
	    if [[ "$NODENAME" == "$LOCALNODENAME" ]]
	    then
	        # If node coming up is the local node, make sure rpc.statd is started.
	        LC_ALL=C lssrc -s rpc.statd | grep inoperative
	        if (( $? == 0 ))
	        then
		    startsrc -s rpc.statd
                    : exit status of startsrc -s rpc.statd is: $?
	        fi
	    fi
            cl_update_statd
            if (( $? )) ; then
                cl_log 1074 "$PROGNAME: Failure occurred while processing cl_update_statd.\n" $PROGNAME
                STATUS=1
            fi
            break # already started on this node -- no need to check further
        fi
    done
fi

if (( $UPDATESTATD ))
then
    if (( $RPCLOCKDSTOPPED ))
    then
        stopsrc -s rpc.lockd
        integer COUNT=60
        while (( COUNT > 0 ))
        do
            LC_ALL=C lssrc -s rpc.lockd | grep stopping
            if (( $? == 0 ))
            then
                (( COUNT-- ))
                sleep 1
            else
                break
            fi
        done
        startsrc -s rpc.lockd
        : exit status of startsrc -s rpc.lockd is: $?
    fi
else
    if (( $RPCLOCKDSTOPPED ))
    then
        integer COUNT=60
        while (( COUNT > 0 ))
	do
	    LC_ALL=C lssrc -s rpc.lockd | grep stopping
	    if (( $? == 0 ))
	    then
		(( COUNT-- ))
		sleep 1
	    else
		break
	    fi
	done
        startsrc -s rpc.lockd
        : exit status of startsrc -s rpc.lockd is: $?
    fi
fi

# if RG_DEPENDENCIES is set to false by the cluster manager,
# then resource groups will be processed via clsetenvgrp
if [[ "$RG_DEPENDENCIES" == "FALSE" && $START_MODE != forced ]]
then

    #
    # Set the RESOURCE_GROUPS environment variable with the name(s)
    # of all Resource Groups participating in this event, and export
    # them to all successive scripts.
    #
    set -a
    eval $(clsetenvgrp $NODENAME $PROGNAME)
    RC=$?
    set +a
    : exit status of clsetenvgrp $NODENAME $PROGNAME is: $RC
    if (( $RC ))
    then
	STATUS=1
    fi

    #
    # For each participating resource group, serially process the resources
    #
    for group in $RESOURCE_GROUPS
    do
        #
        # All sub-scripts must inherit the same environment
        # values which were set by this script (set -a).
        # e.g.: all sub-scripts must inherit VERBOSE_LOGGING value.
        #
        set -a
        eval $(clsetenvres $group $PROGNAME)
        set +a
        export GROUPNAME=$group
            
        #
        # Check nodename, and call node_up_local_complete or 
        # node_up_remote_complete accordingly.
        #
        if [[ "$NODENAME" == "$LOCALNODENAME" ]]
        then
	    clcallev node_up_local_complete
            RC=$?
            : exit status of node_up_local_complete is: $RC
        else
	    clcallev node_up_remote_complete $NODENAME
            RC=$?
            : exit status of node_up_remote_complete is: $RC
        fi
        
        # If any failure has occurred, this script should exit accordingly
        if (( $RC ))
        then
	    cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required.\n" $PROGNAME $group
	    STATUS=1
        fi
    
    done

    #
    # Process_Resources for parallel-processed resource groups
    #
    process_resources
    RC=$?
    : exit status of process_resources is: $RC
    if (( $RC ))
    then
        STATUS=1
    fi
fi

# Refresh clcomd
refresh -s clcomd

: This is the final clRGinfo output

clRGinfo -p -t 2>&1
 
# During LPM if any other node in the cluster coming up which was down
# when LPM is initiated, creating LPM temp file which indicates that LPM
# is ongoing.
if (( $STATUS == 0 ))
then
   if [[ "$NODENAME" != "$LOCALNODENAME" ]]
   then
        lpm_in_progress_file=$(grep -w "In_progress_file" $STATE_FILE 2>/dev/null | cut -d'=' -f2)
        lpm_in_progress_prefix=$(ls $LPM_IN_PROGRESS_DIR/${LPM_IN_PROGRESS_PREFIX}_* 2>/dev/null)
        if [[ -n $lpm_in_progress_prefix ]]
        then
            # if node_up_complete event is being run for the same node where 
            # LPM is in progress then 
            # do not create LPM temp file with another node name
            if [[ "$lpm_in_progress_file" == "$lpm_in_progress_prefix" ]]
            then  
                cl_rsh $NODENAME "/usr/bin/mkdir -p $LPM_IN_PROGRESS_DIR 2>/dev/null; /usr/bin/touch $lpm_in_progress_file 2>/dev/null;"
                RC=$?
                if (( $RC )); then
                    STATUS=1
                fi
                : exit status of cl_rsh to node $NODENAME completed, RC: $RC
            fi
        fi
   fi
fi 
exit $STATUS
