#!/bin/ksh
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2017,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/events/network_up_complete.sh 1.1.3.1 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1990,2014 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/events/network_up_complete.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

#########################################################################
#
#   COMPONENT_NAME: EVENTS
#
#   FUNCTIONS: none
#
#########################################################################

#########################################################################
#                                                                       #
#       Name:           network_up_complete                             #
#                                                                       #
#	Description:	This event script is called when the previously #
#			down network becomes completely available.	#
#									#
#       Called by:      cluster manager                                 #
#                                                                       #
#       Calls to:       None                                            #
#                                                                       #
#       Arguments:      nodename network_name                     	#
#                                                                       #
#                       nodename - id of the node whose network resumes #
#                                functioning.                           #
#                                                                       #
#                       network_name - logical network name from        #
#                                      cluster configuration.           #
#                                                                       #
#       Returns:        0       success                                 #
#                       1       failure                                 #
#                       2       bad argument                            #
#                                                                       #
#########################################################################

typeset PROGNAME=${0##*/}
export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
# Including Availability metrics library file
. /usr/es/lib/ksh93/availability/cl_amlib
set -a
eval $(cllsparam -n $LOCALNODENAME)
set +a

if [[ $VERBOSE_LOGGING == "high" ]]; then
    set -x
    version='%I%'
fi


if [ $# -ne 2 ]
then
    cl_echo 1033 "Usage: $PROGNAME nodename network_name\n"  $PROGNAME
    exit 2
fi

if [[ $1 == $LOCALNODENAME ]]; then
    amlog_trace $AM_NETWORK_UP_COMPLETE_BEGIN "$EVENT_SERIAL_NUMBER|$1|$2"
fi

NODENAME=$1
NETWORK=$2
export NETWORKNAME=$NETWORK

if [[ -z "$EMULATE" ]]
then
    EMULATE="REAL"
fi
set -u

#
# This will be the exit status seen by the Cluster Manager.
# All lower-level scripts should pass status back to the caller.
#
STATUS=0

# If this is a two node cluster and exported filesystems exist, then when the
# cluster topology is stable notify rpc.statd of the changes
if [ 2 -eq $(odmget HACMPnode | grep 'name =' | sort | uniq | wc -l) ]
then
    RESOURCE_GROUPS=$(odmget HACMPgroup | grep 'group =' | awk '{print $3}' | sed 's/"//g')
    for group in $RESOURCE_GROUPS
    do
	EXPORTLIST=$(odmget -q "group=$group AND name=EXPORT_FILESYSTEM" HACMPresource \
	    | grep value | awk '{print $3}' | sed 's/"//g')
	if [ -n "$EXPORTLIST" ]
	then
	    if [ "$EMULATE" = "EMUL" ]
	    then
	        cl_echo 3020 "NOTICE >>>> The following command was not executed <<<< \n"
	        echo "cl_update_statd\n"
	    else
	        cl_update_statd
	        if [ $? -ne 0 ] ; then
		    cl_log 1074 "$PROGNAME: Failure occurred while processing cl_update_statd.\n" $PROGNAME
		    STATUS=1
	        fi
	    fi
	    break
        fi
    done
fi

if [[ "$NODENAME" == "$LOCALNODENAME" ]]
    then
    # If this is an aliasing network, then we may need to re-alias persistent
    # IP labels.
    ALIASING=$(odmget -q"name=$NETWORK" HACMPnetwork \
	        | awk '$1 == "alias" {print $3}' \
	        | sed 's/"//g')
    if [[ $ALIASING == "1" ]]; then
        cl_configure_persistent_address aliasing_network_up -n $NETWORK
    fi
fi

#
# Call replicated resource net-initialization methods
#

METHODS=$(cl_rrmethods2call net_initialization)

for method in $METHODS
do
  if [[ -x $method ]]
  then
      if ! $method $*
      then
        STATUS=1
      fi
  fi
done


#
# This is the template script for network_up_complete event,
# and is locally configurable.
#

#
# Possible NFS re-mount
#
CROSSMOUNTS=$(clodmget -n -q "name=MOUNT_FILESYSTEM" -f group  HACMPresource)

if [ -n "${CROSSMOUNTS}" -a "${NODENAME}" = "${LOCALNODENAME}" ]
then
    #
    : Remount any NFS cross mount if required 
    #
    # Only execute this code, if the network_up_complete event
    # is for the local node and if there are cross mounts in 
    # HA config

    RESOURCE_GROUPS=$(clodmget -n -f group HACMPgroup )
    for group in $RESOURCE_GROUPS
    do

        # Skip the resource group, if
        # - it has no NFS mounts
        # - the local node is not part of the RG
        # - it is not ONLINE (-> Set NFS_HOST to node where the RG is ONLINE)
        # - a NFS_NETWORK is defined and the network_up_complete event is for another network

        # Set/check MOUNT_FILESYSTEM
        MOUNT_FILESYSTEM=$(clodmget -n -q "name=MOUNT_FILESYSTEM and group=${group}" -f value HACMPresource )
        [[ -z "${MOUNT_FILESYSTEM}" ]] && continue

        # Check if local node is in the RG
        IN_RG=false
        for node in $(clodmget -n -q "group=${group}" -f nodes HACMPgroup )
        do
            [[ "${node}" == "${LOCALNODENAME}" ]] && IN_RG=true
        done
        [[ "${IN_RG}" == "false" ]] && continue 

        # Check if RG is ONLINE and set  NFS_HOST to node where the RG is ONLINE
        NFS_HOST=$(clRGinfo -s ${group} | awk -F : '{ if ( $2 == "ONLINE" ) print $3 }')
        [[ -z "${NFS_HOST}" ]] && continue

        # Check if a NFS_NETWORK is defined. If yes it it the network handled in this event?
        NFS_NETWORK=$(clodmget -n -q "name=NFS_NETWORK and group=${group}" -f value HACMPresource )
        if [ -n "${NFS_NETWORK}" -a "${NFS_NETWORK}" = "${NETWORK}" ]
        then
            continue
        fi

        NFSHOST=""
        NFSMOUNT_LABEL=""
        ALL_NFSMOUNT_LABEL=$(clodmget -n -q "group=${group} AND name=SERVICE_LABEL" -f value HACMPresource )
        if [ -n "${NFS_NETWORK}" ]
        then
            for label in ${ALL_NFSMOUNT_LABEL}
            do
                IN_NETWORK=$(cllsif -cS 2> /dev/null | grep :$NFS_NETWORK: | cut -d: -f1 | grep -x ${label} )
                if [ -n "${IN_NETWORK}" ]
                then
                    NFSMOUNT_LABEL="${NFSMOUNT_LABEL} ${label}"
                fi
            done
        else
            NFSMOUNT_LABEL=${ALL_NFSMOUNT_LABEL}      
        fi

        #
        # Select active service label
        #
        if [ -n "${NFSMOUNT_LABEL}" ]
        then
            # make sure to flush the arp cache for this label.
            for label in ${NFSMOUNT_LABEL} 
            do
                arp -d ${label}
                ping ${label} 1024 1 >/dev/null
                RC=$?
                : exit status of ping ${label} is: $RC
                if [ $RC -eq 0 ]
                then
                        NFSHOST=${label}
                        break
                fi
            done

            if [ -n "${NFSHOST}" ]
            then
                GROUPNAME=${group} cl_activate_nfs 1 ${NFSHOST} "${MOUNT_FILESYSTEM}"
                RC=$?
                #
                : exit status of cl_activate_nfs 1 $NFSHOST ${MOUNT_FILESYSTEM} is: $RC
                # Don't update STATUS variable with RC -> mount failure will not cause an event error
            else
                cl_log 653 "$PROGNAME: NFS Mounting failed. No reachable service interfaces found on node $NFS_HOST\n"  $PROGNAME $NFS_HOST
            fi
        fi

   done
fi
if [[ $1 == $LOCALNODENAME ]]; then
    if [[ $STATUS -ne 0 ]];then
        #Logging failure in clavailability.log
        amlog_trace $AM_NETWORK_UP_FAILURE "$EVENT_SERIAL_NUMBER|$1|$2"
    else
        amlog_trace $AM_NETWORK_UP_COMPLETE_END "$EVENT_SERIAL_NUMBER|$1|$2"
    fi
fi 
exit $STATUS
