#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2021.  All rights reserved.
#
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/events/rg_move.sh 1.50.1.2 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1999,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG
# @(#)  7d4c34b 43haes/usr/sbin/cluster/events/rg_move.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

#================================================
# The following, commented line enforces coding
# standards when this file is edited via vim.
#================================================
# vim:tabstop=4:shiftwidth=4:expandtab:smarttab
#================================================

#########################################################################
#                                                                       #
#       Name:           rg_move                                         #
#                                                                       #
#       Description:    This event script is called when a resource     #
#                       group needs to move.                            #
#                       The script checks to see whether the nodename   #
#                       is the local node or not, then calls sub-event  #
#                       scripts to release resources (if this is the    #
#                       node from which the group is moving) or         #
#                       (possibly) acquire resources (if this is a      #
#                       different node).                                #
#                                                                       #
#       Called by:      cluster manager                                 #
#                                                                       #
#       Calls to:       node_up_remote, node_up_local,                  #
#                       cllsparam, clsetenvgrp, clsetenvres,            #
#                       cl_RMupdate, cl_get_path                        #
#                                                                       #
#       Arguments:      nodename rg_ID [ACQUIRE | RELEASE]              #
#                                                                       #
#       Returns:        0       success                                 #
#                       1       failure                                 #
#                       2       bad argument                            #
#                                                                       #
#########################################################################

PROGNAME=${0##*/}
PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
export PATH

#
# Set the Run-Time Parameter values and export them
# to all successive scripts.
#

#   The following lines create informative trace output
LOCAL_NODE=$(get_local_nodename)
set -a
    eval $(cllsparam -n $LOCAL_NODE)
set +a
if [[ $VERBOSE_LOGGING == high ]]; then
    set -x
    version='%I%'
fi

#
# This will be the exit status seen by the Cluster Manager.
# If STATUS is not 0, the Cluster Manager will enter reconfiguration
# All lower-level scripts should pass status back to the caller.
# This will allow a Resource Group to be processed individaully,
# independent of the status of another resource group.
#
STATUS=0

if [[ ! -n $EMULATE ]]
then
    EMULATE="REAL"
fi
# All the hard work is done in the sub scripts node_down_local and
# node_up_local, and they handle emulation themselves.  So the EMULATE
# variable is never referenced in this script.

set -u

export NODENAME=$1
RGID=$2
if (( $# == 3 ))
then
    ACTION=$3
else
    ACTION=""
fi

#
: serial number for this event is $EVENT_SERIAL_NUMBER
#

#Export a var to help clmanageroha to determine if release is async
export RG_UP_POSTEVENT_ON_NODE=$NODENAME

#interpret resource group ID into a resource group name.

eval RGNAME=$(clodmget -q"id=$RGID" -f group -n HACMPgroup)

UPDATESTATD=0      #the statd is needed for NFS mounting.  It
export UPDATESTATD #should be updated whenever NFS mounts change


# export variables for use later if lsvg -l or varyoffvg fail
export RG_MOVE_EVENT=true

# use the local nodename to calculate RG_MOVE_ONLINE. This is used
# later in scripts like cl_deactivate_vgs for calculating whether
# this rg_move is due to selective failover
group_state=\$RESGRP_${RGNAME}_${LOCAL_NODE//-/$HA_DASH_CHAR}
set +u
export RG_MOVE_ONLINE=$(eval print $group_state)
set -u
RG_MOVE_ONLINE=${RG_MOVE_ONLINE:-TMP_ERROR}


# The status of NFS daemons is indicated by the existence of two files.  We
# make sure these files have been removed, because we assume that any NFS
# daemons are already running:

rm -f /tmp/.NFSSTOPPED
rm -f /tmp/.RPCLOCKDSTOPPED

#
# Set the RESOURCE_GROUPS environment variable with the name(s)
# of all Resource Groups participating in this event, and export
# them to all successive scripts.
#
set -a
    clsetenvgrp_output=$(clsetenvgrp $NODENAME $PROGNAME $RGNAME)
    RC=$?
    eval "$clsetenvgrp_output"
set +a

# If clsetenvgrp fails, or if RGNAME is not set properly, fail the script

if (( $RC != 0 )) || [[ -z $RGNAME ]]
then
    STATUS=1
    #
    # If clsetenvgrp fails then there is something seriously wrong with
    # the clsuter manager. Exit with exit code of -1.
    exit -1
fi

if [[ -z "$RG_DEPENDENCIES" ]]
then
    RG_DEPENDENCIES="FALSE"
fi

#
# For each participating resource group, serially process the resources
#
for group in $RESOURCE_GROUPS
do
    #
    # All sub-scripts must inherit the same environment
    # values which were set by this script (set -a).
    # e.g.: all sub-scripts must inherit VERBOSE_LOGGING value.
    #
    set -a
        eval $(clsetenvres $group $PROGNAME)
    set +a
    export GROUPNAME=$group

    #
    # Check nodename, and call node_down_local or node_up_local accordingly.
    #
    # This looks odd, but only because the script names reflect how
    # they were originally called, not what they do.  For example,
    # reconfig_resource_release also calls node_down_local, and
    # reconfig_resource_acquire also calls node_up_local.
    #
    # node_down_local takes a resource group (implicitly, through
    # an environment variable) and releases its resources.
    # Similarly, node_up_local takes a resource group implicitly,
    # and acquires its resources.  NFS remounting and running
    # applications, wait for the _complete event.
    #

    if [[ $ACTION == "" ]]
    then
        # rg_move was called during migration, so ACTION was not specified.
        # Get it from PRINCIPAL_ACTION
        ACTION="$PRINCIPAL_ACTION"
    fi

    if [[ $ACTION == "RELEASE" ]]
    then
        if [[ $PRINCIPAL_ACTION  == "RELEASE" || \
              $ASSOCIATE_ACTION  == "UMOUNT"  || \
              $AUXILLIARY_ACTION == "RELEASE_SECONDARY" ]]
        then
            clcallev node_down_local
        fi
    elif [[ $ACTION == "ACQUIRE" ]]
    then 
        if [[ $PRINCIPAL_ACTION  == "ACQUIRE" || \
              $AUXILLIARY_ACTION == "ACQUIRE_SECONDARY" ]]
        then
            MOUNT_FILESYSTEM="" # will acquire NFS mounts in rg_move_complete
            clcallev node_up_local
        fi
    fi

    # If ANY failure has occurred, this script should exit accordingly
    if (( $? != 0 ))
    then
        cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group
        STATUS=1
    fi

    UPDATESTATD=1 #the statd should be updated only once per node
done

#
# Process_Resources for cluster manager-driven actions
#
# AM_SYNC_CALLED_BY determines if the cl_sync_vgs is called
# as part of rg_move event.This is used for logging VG sync
# in clavailability.log .
#
export AM_SYNC_CALLED_BY="RG_MOVE"
if ! process_resources
then
    STATUS=1
fi
#
: unsetting AM_SYNC_CALLED_BY from caller's environment as
: we don't require it after this point in execution.
# 
unset AM_SYNC_CALLED_BY
# Restart the NFS daemons as neeeded, so that the mounts can be
# reacquired in rg_move_complete.
if [[ -f /tmp/.NFSSTOPPED ]]
then
    rm -f /tmp/.NFSSTOPPED
    ODMDIR=/etc/objrepos chnfs -g on -x 1

    startsrc -s nfsd
    rcstartsrc=$?
    if (( $rcstartsrc != 0 ))
    then
        : rc_startsrc.nfsd = $rcstartsrc
    fi

    startsrc -s rpc.mountd
    rcstartsrc=$?
    if (( $rcstartsrc != 0 ))
    then
        : rc_startsrc.rpc.mountd = $rcstartsrc
    fi

fi

if [[ -f /tmp/.RPCLOCKDSTOPPED ]]
then
    rm -f /tmp/.RPCLOCKDSTOPPED
    integer ATTEMPT=0
    while (( ATTEMPT++ < 60 ))
    do
        : rpc.lockd status check #$ATTEMPT
        LC_ALL=C lssrc -s rpc.lockd | grep stopping
        (( $? == 0 )) && sleep 1 || break
    done

    startsrc -s rpc.lockd
    rcstartsrc=$?
    if (( $rcstartsrc != 0 ))
    then
        : rc_startsrc.rpc.lockd = $rcstartsrc
    fi
fi

exit $STATUS
