#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2017,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#

#  @(#)  7d4c34b 43haes/lib/ksh93/ezupdate/Node_t.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

###############################
# Function list:
#  build
#  build_items_to_install
#  check_and_verify_hdisk
#  check_dir
#  check_if_cluster_services_running
#  check_nim_server
#  check_rgs_can_move
#  cli_cmd
#  clrsh_cmd
#  display
#  get_items_from_source
#  install_apply
#  install_preview
#  mount_nim_res
#  reboot
#  refresh
#  reject
#  rollback
#  rollback_reboot 
#  start
#  start_PowerHA
#  stop
#  umount_nim_res
###############################

typeset scriptname=${0##*/}

. ${EZU_LIB_DIR}/common
. ${EZU_LIB_DIR}/log


##############################################################
# This type to encapsulate Node object and functions
##############################################################
typeset -T Node_t=(
    typeset -h 'node name'                  name=""
    typeset -h 'host name'                  host_name=""
    typeset -h 'PowerHA version'            version=""
    typeset -h 'node version_number'        version_number=""
    typeset -h 'AIX version'                aix_version=""
    typeset -h 'AIX version'                caa_version=""
    typeset -h 'RSCT version'               rsct_version=""
    typeset -h 'PowerHA node state'         state=""
    typeset -h 'CAA state'                  caa_state=""
    typeset -h 'unsync changed'             unsynced_changes=""
    typeset -h 'cluster RG list'            rg_list=""
    typeset -h 'eligible RG list'           eligible_rgs=""
    typeset -h 'active RG list'             active_rgs=""
    typeset -h 'eligible concurrent VGs'    eligible_conc_vg=""
    typeset -h 'active concurrent VGs'      active_conc_vg=""
    typeset -h 'node localization'          localization=${NODE_LOCALIZATION.REMOTE}
    typeset -h 'NIM master'                 nim_master=""
    typeset -h 'NIM Resource location'      nim_res_loc=""
    typeset -h 'item list from source'      item_list=""
    typeset -h 'fileset list from source'   fileset_list=""
    typeset -h 'ifix list from source'      ifix_list=""
    typeset -h 'ifix label list from source'    ifix_label=""
    typeset -h 'fileset to update'          lpp_to_update=""
    typeset -h 'fileset to reject'          applied_list=""
    typeset -h 'ifix to install'            ifix_to_install=""
    typeset -h 'ifix to remove'             ifix_label_to_remove=""
    typeset -h 'ifix to remove'             ifix_label_to_install=""
    typeset -h 'fileset to update locked by ifix'   install_locked_list=""
    typeset -h 'fileset to reject locked by ifix'   reject_locked_list=""
    typeset -h 'manage mode'                manage_mode=${MANAGE_MODE.NON_DISRUPTIVE}
    typeset -h 'reject manage mode'         reject_manage_mode=${MANAGE_MODE.NON_DISRUPTIVE}
    typeset -h 'stop mode'                  stop_mode="${MANAGE_MODE.NONE}"
    typeset -h 'ifix locking fileset to apply'      ifix_locking_apply=""
    typeset -h 'ifix locking fileset to reject'     ifix_locking_reject=""
    typeset -h 'hdisk name for rollback'        hdisk=""
    typeset -h 'bootlist for node'	    bootlist=""
    typeset -h 'rsct requires powerha offline'  rsct_requires_powerha_offline="no"	

    #####################################################################
    #
    # NAME: display
    #
    # FUNCTION:
    #     Display the current Node_t object
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #           Node_t
    #
    # RETURNS: none
    #
    # OUTPUT:
    #####################################################################
    function display {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        print -- " node"
        print -- "    name="${_.name}
        print -- "    host name="${_.host_name}
        print -- "    state="${_.state}
        print -- "    caa state="${_.caa_state}
        print -- "    version="${_.version}
        print -- "    version number="${_.version_number}
        print -- "    aix version="${_.aix_version}
        print -- "    caa version="${_.caa_version}
        print -- "    rsct version="${_.rsct_version}
        print -- "    node localization="${_.localization}
        print -- "    nim master="${_.nim_master}
        print -- "    eligible RG list="${_.eligible_rgs=}
        print -- "    active RG list="${_.active_rgs}
        print -- "    eligible concurrent VGs="${_.eligible_conc_vg}
        print -- "    active concurrent VGs="${_.active_conc_vg}
	print -- "    hdisk to be used to rollback="${_.hdisk}
	print -- "    bootlist for node="${_.bootlist}
    }

    #####################################################################
    #
    # NAME: build
    #
    # FUNCTION:
    #     Build the node object
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #     Any object Node_t should contain a valid name field.
    #     An Cluster_t object can get the node name and set it before to
    #     construct the object Node_t.
    #     The create discipline function cannot be used as it is automatically
    #     executed when the object is declared, and at this phase it does not
    #     know the node name.
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #           Cluster_t
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.PHA_CMD_ERROR - PowerHA command error
    #     RC.FAILURE       - a clmgr or other command failed
    #     RC.OK            - Success
    #
    # OUTPUT:
    #####################################################################
    function build {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        trap 'print -- "$CANNOT_INT_MSG"' INT
        
        typeset cmd=""
        typeset -i rc=${RC.UNKNOWN}
        typeset cmd_output=""
        typeset local_hostname=""
        typeset local_nodename=""
        typeset list_node=""
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t build"
        
        # Get hostname
        cmd="/usr/bin/hostname"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        cmd_output=$($cmd 2>&1)
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.FAILURE}
        fi
        local_hostname=$cmd_output
        
        # Get local node name
        cmd="${TOOLDIR}/get_local_nodename"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        cmd_output=$($cmd 2>&1)
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.FAILURE}
        fi
        local_nodename=$cmd_output
        
        
        # See if _.name is already set and if so, fill in this Node_t
        # with the information from the for that node
        if [[ -z ${_.name} ]]
        then
            _.name=${local_nodename}
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.name=\"${_.name}\""
        
        # Since the ._name field was already filled in, we want to
        # fill in the Node_t for the specified _.name with the
        # information for that node. The hostname, state, version
        # and unsynced infromation can be taken from the local
        # nodes PowerHA configuration (ODM).
        # The version number and AIX level must be taken from the
        # specified node using remote commands
        tmp_name=${_.name}    # avoid . in sed search string
        log_trace 0 "$0()[$LINENO]($SECONDS): tmp_name=\"$tmp_name\""
        cmd="$CLMGR_CMD -cSa HOSTNAME,STATE,CAA_STATE,VERSION,UNSYNCED_CHANGES query node ${_.name}"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        cmd_output=$($cmd 2>&1)
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.PHA_CMD_ERROR}
        fi
        echo $cmd_output | IFS=: read _.host_name _.state _.caa_state _.version _.unsynced_changes

	DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 81 'INFO: The node: %1$s is in state: %2$s' "${_.name}" "${_.state}" 
        
        if [[ ${_.name} == ${local_nodename} ]]
        then
            _.localization=${NODE_LOCALIZATION.LOCAL}
        else
            _.localization=${NODE_LOCALIZATION.REMOTE}
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.localization=\"${_.localization}\""
        
        # Get the AIX level from remote node named ._name
        cmd="/usr/bin/oslevel"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        _.cli_cmd cmd_output "$cmd"
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.FAILURE}
        fi
        _.aix_version=$cmd_output
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.aix_version=\"${_.aix_version}\""
        
        # Get the RSCT version from remote node named ._name
        cmd="/usr/sbin/rsct/install/bin/ctversion"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        _.cli_cmd cmd_output "$cmd"
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.FAILURE}
        fi
        _.rsct_version=$(echo $cmd_output |awk '{print $2}')
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.rsct_version=\"${_.rsct_version}\""
        
        # Get the CAA version from remote node named ._name
        cmd="/usr/bin/lslpp -L |grep bos.cluster.rte"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        _.cli_cmd cmd_output "$cmd"
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.FAILURE}
        fi
        _.caa_version=$(echo $cmd_output |awk '{print $2}')
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.caa_version=\"${_.caa_version}\""
        
        # Get the PowerHA version number from remote node named ._name
        cmd="$TOOLDIR/clodmget -q 'object = VERBOSE_LOGGING and name = ${_.name}' -f version -n HACMPnode"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        _.cli_cmd cmd_output "$cmd"
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.FAILURE}
        fi
        _.version_number=$cmd_output
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.version_number=\"${_.version_number}\""
        
        
        if [[ ${_.rg_list} == "" ]]
        then
            cmd="$CLMGR_CMD query resource_group"
            log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
            cmd_output=$($cmd 2>&1)
            rc=$?
            log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
            log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
            if (( $rc != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
                return ${RC.PHA_CMD_ERROR}
            fi
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.rg_list=\"${_.rg_list}\""
        fi
        
        _.active_rgs=""
        
        for rg in ${_.rg_list}
        do
            cmd="$CLMGR_CMD -cSa CURRENT_NODE,STATE,TYPE,STARTUP,VOLUME_GROUP,NODES,SECONDARYNODES query resource_group ${rg}"
            log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
            cmd_output=$($cmd 2>&1)
            rc=$?
            log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
            log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
            if (( $rc != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
                return ${RC.PHA_CMD_ERROR}
            fi
            echo $cmd_output | IFS=: read current_node status type startup volume_group nodelist secondarynodes
           
	    #Check if the RG is in error state then exit	
	    if [[ "$status" == "ERROR" ]]
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 82 'ERROR: The RG: %1$s is in state: %2$s' "${rg}" "${status}"
                DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 83 'INFO: Please correct the RG state and retry'
                return ${RC.FAILURE}
            fi 
 
            nodelist="${nodelist} ${secondarynodes}"
            
            # Fill in active_rgs list in the Node_t for each node this RG ain ONLINE on
            if [[ $status == "ONLINE" ]]
            then
                for node in $current_node
                do
                    if [[ $node == ${_.name} ]]
                    then
                        log_trace 5 "$0()[$LINENO]($SECONDS): adding $rg to ${_.name} active_rgs list"
                        _.active_rgs=${_.active_rgs:+${_.active_rgs} }${rg}
                    fi
                done
            fi
            
            # Fill in eligible_rgs list in Node_t for each node in nodelist and eligible_conc_vg list if
            #the RG is concurrent (start policy OAAN)
            for node in $nodelist
            do
                if [[ ${node} == ${_.name} ]]
                then
                    _.eligible_rgs=${_.eligible_rgs:+${_.eligible_rgs} }${rg}
                    if [[ $startup == "OAAN" ]]
                    then
                        _.eligible_conc_vg=${_.eligible_conc_vg:+${_.eligible_conc_vg} }${volume_group}
                     fi
                fi
            done
        done
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.active_rgs=\"${_.active_rgs}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.eligible_rgs=\"${_.eligible_rgs}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.eligible_conc_vg=\"${_.eligible_conc_vg}\""
        
        # Get the common open and eligible list of vg
        cmd="/usr/sbin/lsvg -o"
        _.clrsh_cmd open_cvg "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.FAILURE}
        fi
        open_cvg="${open_cvg} ${_.eligible_conc_vg}"
        _.active_conc_vg=$(echo "$open_cvg" | sed -e's/  */ /g' | tr ' ' '\n' | sort | uniq -d | tr '\n' ' ')
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.active_conc_vg=\"${_.active_conc_vg}\""
        
        # Check the NIM configuration only if lpp source is NIM and not local repo
        # Also check the NIM configuration in case it is a query for nim/cluster/all 
        if [[ $source == ${UPDATE_SOURCE.NIM} || $query_scope == "NIM" || \
                    $query_scope == "CLUSTER" || $query_scope == "ALL" || $query_scope == "NODE" ]]
        then
            # Get the nim master from /etc/niminfo
            cmd="/usr/bin/grep -w '^export.*NIM_MASTER_HOSTNAME' /etc/niminfo"
            _.clrsh_cmd cmd_output "$cmd"
	    rc=$?	
            
            #------------------------------------------------------------------------
            # If the query scope is cluster then do not return with error message.
            # Instead just print a warning message that NIM client is not configured
            #-------------------------------------------------------------------------
            if [[ $rc != 0 && ( $source == ${UPDATE_SOURCE.NIM} || $query_scope == "NIM" || $query_scope == "ALL" ) ]]
            then
                DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
                DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 20 'ERROR: NIM Client is not configured properly on the node: %1$s. Configure the NIM Client and retry' "${_.name}"
                return ${RC.FAILURE}
            elif [[ $rc != 0 && ( $query_scope == "CLUSTER" || $query_scope == "NODE" ) ]]
            then
                DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 22 'WARNING: NIM Client is not configured properly on the node: %1$s.' "${_.name}"
            fi
            
            _.nim_master=$(echo $cmd_output |awk -F = ' { print $2 }')
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.nim_master=\"${_.nim_master}\""
        fi
    
	#In case rollback enabled or -X is provided then update the bootlist 
	if [[ $rb == 1 || $rb == 2 || $exit_after_copy == 1 ]]
	then
	    cmd="bootlist -m normal -o"
	    _.clrsh_cmd cmd_output "$cmd"
	    _.bootlist="$cmd_output"
	    log_trace 1 "$0()[$LINENO]($SECONDS): The bootlist for node : ${_.name} is ${_.bootlist}"
	fi

        return ${RC.OK}
    } # End of "build()"

    #####################################################################
    #
    # NAME: refresh
    #
    # FUNCTION:
    #     Refresh Node_t data
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: refresh_scope = node_level | node_status | rg_vg_status
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.PHA_CMD_ERROR - PowerHA command error
    #     RC.FAILURE       - an other command failed
    #     RC.OK            - Success
    #
    # OUTPUT:
    #####################################################################
    function refresh {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset refresh_scope="$1"
        typeset cmd=""
        typeset -i rc=${RC.OK}
        typeset cmd_output=""
        typeset open_cvg=""
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t refresh"        
        if [[ $refresh_scope == "node_level" ]]
        then
            cmd="$CLMGR_CMD -cSa VERSION query node ${_.name}"
        elif [[ $refresh_scope == "node_status" ]]
        then
            cmd="$CLMGR_CMD -cSa STATE,CAA_STATE,UNSYNCED_CHANGES query node ${_.name}"
        elif [[ $refresh_scope != "rg_vg_status" ]]
        then
            refresh_scope="all"
            cmd="$CLMGR_CMD -cSa STATE,CAA_STATE,VERSION,UNSYNCED_CHANGES query node ${_.name}"
        fi
        
        if [[ $refresh_scope != "rg_vg_status" ]]
        then
            log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
            cmd_output=$($cmd 2>&1)
            rc=$?
            log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
            log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
            (( $rc != 0 )) && return ${RC.PHA_CMD_ERROR}
            
            if [[ $refresh_scope == "node_level" ]]
            then
                echo $cmd_output | IFS=: read _.version
            elif [[ $refresh_scope == "node_status" ]]
            then
                echo $cmd_output | IFS=: read _.state _.caa_state _.unsynced_changes
            else
                echo $cmd_output | IFS=: read _.state _.caa_state _.version _.unsynced_changes
            fi
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.state=\"${_.state}\""
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.caa_state=\"${_.caa_state}\""
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.version=\"${_.version}\""
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.unsynced_changes=\"${_.unsynced_changes}\""
        fi
        
        if [[ $refresh_scope == "node_level" || $refresh_scope == "all" ]]
        then
            # Get the AIX level from remote node named ._name
            cmd="/usr/bin/oslevel"
            _.cli_cmd cmd_output "$cmd"
            (( $? != 0 )) && return ${RC.FAILURE}
            _.aix_version=$cmd_output
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.aix_version=\"${_.aix_version}\""
            
            # Get the RSCT version from remote node named ._name
            cmd="/usr/sbin/rsct/install/bin/ctversion"
            _.cli_cmd cmd_output "$cmd"
            (( $? != 0 )) && return ${RC.FAILURE}
            _.rsct_version=$(echo $cmd_output |awk '{print $2}')
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.rsct_version=\"${_.rsct_version}\""
            
            # Get the CAA version from remote node named ._name
            cmd="/usr/bin/lslpp -L |grep bos.cluster.rte"
            _.cli_cmd cmd_output "$cmd"
            (( $? != 0 )) && return ${RC.FAILURE}
            _.caa_version=$(echo $cmd_output |awk '{print $2}')
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.caa_version=\"${_.caa_version}\""
            
            # Get the PowerHA version number from remote node named ._name
            cmd="$TOOLDIR/clodmget -q 'object = VERBOSE_LOGGING and name = ${_.name}' -f version -n HACMPnode"
            _.cli_cmd cmd_output "$cmd"
            (( $? != 0 )) && return ${RC.FAILURE}
            _.version_number=$cmd_output
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.version_number=\"${_.version_number}\""
        fi
        
        if [[ $refresh_scope == "rg_vg_status" || $refresh_scope == "all" ]]
        then
            _.active_rgs=""
            _.active_conc_vg=""
            
            for rg in ${_.eligible_rgs}
            do
                cmd="$CLMGR_CMD -cSa CURRENT_NODE,STATE,TYPE,STARTUP,VOLUME_GROUP query resource_group ${rg}"
                log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
                cmd_output=$($cmd 2>&1)
                rc=$?
                log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
                log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
                (( $rc != 0 )) && return ${RC.PHA_CMD_ERROR}
                echo $cmd_output | IFS=: read current_node status type startup volume_group
                
                if [[ $status == "ONLINE" ]]
                then
                    for node in $current_node
                    do
                        if [[ $node == ${_.name} ]]
                        then
                            _.active_rgs=${_.active_rgs}${rg:+ $rg}
                            if [[ $startup == "OAAN" ]]
                            then
                                _.active_conc_vg=${_.active_conc_vg}${volume_group:+ $volume_group}
                            fi
                        fi
                    done
                fi
            done
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.active_rgs=\"${_.active_rgs}\""
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.active_conc_vg=\"${_.active_conc_vg}\""
            
            cmd="/usr/sbin/lsvg -o"
            _.clrsh_cmd open_cvg "$cmd"
            (( $? != 0 )) && return ${RC.FAILURE}
            
            open_cvg="${open_cvg} ${_.eligible_conc_vg}"
            
            # Filter VG that are open and eligible concurrent
            _.active_conc_vg=$(echo "$open_cvg" | sed -e's/  */ /g' | tr ' ' '\n' | sort | uniq -d | tr '\n' ' ')
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.active_conc_vg=\"${_.active_conc_vg}\""
        fi
        return ${RC.OK}
    } # End of "refresh()"
    
    
    #####################################################################
    #
    # NAME: stop
    #
    # FUNCTION:
    #     Stop the powerHA node depending on the stop mode and the node state.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #     It checks if all active RGs can move on another active node in case a
    #     reboot is needed.
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: ezu_mode = "offline"|"unmanage"|"move"|"non_disruptive"|"silent"|"rolling"
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.RG_CANNOT_MOVE - At least one RG will not be able to move
    #     RC.FAILURE        - Failed to stop the node
    #     RC.OK             - Success
    #
    # OUTPUT:
    #####################################################################
    function stop {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        Node_t node
        typeset cmd=""
        typeset -i rc=${RC.OK}
        typeset output=""
        typeset ezu_mode="$1"
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t stop"
        [[ -z $ezu_mode ]]  && ezu_mode=${_.manage_mode}

        log_trace 5 "$0()[$LINENO]($SECONDS): ezu_mode is $ezu_mode"
        #-----------------------------------------------------
        : Set the stop mode depending on the EZU mode
        #-----------------------------------------------------
        if [[ -n ${_.active_rgs}  ]]
        then
	    if [[ $ezu_mode == ${MANAGE_MODE.ROLLING} || $ezu_mode == ${MANAGE_MODE.MOVE} || "${_.rsct_requires_powerha_offline}" == "yes" ]]
	    then
		#
                : Check all active rgs have an active node the rg is eligible to be moved to.
                #
                _.refresh "node_status"
                _.check_rgs_can_move
                rc=$?
                if (( $rc != ${RC.OK} ))
                then
                    (( $rc == ${RC.RG_CANNOT_MOVE} )) && \
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 50 'ERROR: At least one Resource Group on node: %1$s, has no active backup node.\n'\
                            "${_.name}"
                    return $rc
                fi
                _.stop_mode=${MANAGE_MODE.MOVE}	
            elif [[ $ezu_mode == ${MANAGE_MODE.NON_DISRUPTIVE} || $ezu_mode == ${MANAGE_MODE.SILENT} \
               || $ezu_mode == ${MANAGE_MODE.UNMANAGE} || $ezu_mode == ${MANAGE_MODE.OFFLINE} ]]
            then
                _.stop_mode=${MANAGE_MODE.UNMANAGE}
            fi
        elif [[  ${_.state} == "ONLINE" || ${_.state} == "NORMAL" ]]
        then
            _.stop_mode=${MANAGE_MODE.OFFLINE}
        elif [[  ${_.state} == "OFFLINE" ]]
        then
            _.stop_mode=${MANAGE_MODE.NONE}
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.stop_mode=\"${_.stop_mode}\""
        
        
        #-----------------------------------------------------
        : Stop the node if needed
        #-----------------------------------------------------
        if [[ ${_.stop_mode} != ${MANAGE_MODE.NONE} ]]
        then

	    echo ${_.aix_version} | IFS=. read VV RR MM FF
	    #
            : If AIX version is greater than 7.1 then stop the vg is silent mode
	    : cl_ezupdate works only for AIX 7 & higher so check only RR digit
            #
            if (( $RR > 1 ))
            then

                for vg in ${_.active_conc_vg}
                do
                    DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 79 'Setting active concurent VG: %1$s in sleep mode.\n'\
                        "$vg"
                    cmd="/usr/sbin/varyonvg -S $vg"
                    _.clrsh_cmd output "$cmd"
                    if (( $? != 0 ))
                    then
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 80 'ERROR: Failed to set active concurent VG: %1$s in sleep mode.\n'\
                            "$vg"
                        (( rc == ${RC.OK} )) && rc=${RC.FAILURE}
                        # best effort, continue
                    fi
                done
            fi	    


            #
            : Stopping PowerHA cluster services on node ${_.name} with MANAGE=${_.stop_mode}
            #
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 1 'Stopping PowerHA cluster services on node: %1$s in %2$s mode...\n'\
                "${_.name}" "${_.stop_mode}"
            cmd="$CLMGR_CMD stop node ${_.name} WHEN=now MANAGE=${_.stop_mode}"
            log_trace 0 "$0()[$LINENO]($SECONDS):execute cmd:  $cmd"
            $cmd 2>&1
            rc=$?
            log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"
            
            if (( $rc != 0 && $rc != 127 )) # The clmgr command returns 127 in case of warning
            then
                # no DSPMSG here as it's already done in CLMGR_CMD
                rc=${RC.FAILURE}
            else
                rc=${RC.OK}
            fi
            _.refresh "node_status"
        fi

        return $rc
    } # End of "stop()"


    #####################################################################
    #
    # NAME: start
    #
    # FUNCTION:
    #     Start PowerHA depending on the start mode.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #     In ROLLING manage mode, it reboots the node. If not the local node it
    #     waits the node is joinable and powerHA cluster services are ready.
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: ezu_mode = "online"|"auto"|"manual"|"unmanage"|"offline"
    #                         |"move"|"non_disruptive"|"silent"|"rolling"
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.FAILURE    - error (pb with? reboot, start clstrmgrES, varyonvg)
    #     RC.PING_ERROR - timed out before pingable
    #     RC.SSH_ERROR  - timed out before clrshable
    #     RC.OK         - Success
    #
    # OUTPUT:
    #####################################################################
    function start {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x

        typeset cmd=""
        typeset -i rc=${RC.OK}
        typeset cmd_output=""
        typeset ezu_mode="$1"
        typeset start_mode=""
        typeset output=""
 	typeset clstrmgr_state=""      
 
        log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t start"
        [[ -z $ezu_mode ]] && ezu_mode=${_.manage_mode}

        log_trace 5 "$0()[$LINENO]($SECONDS): ezu_mode is $ezu_mode"
        if [[ $ezu_mode == ${MANAGE_MODE.ROLLING} ]]
        then
            #-----------------------------------------------------
            : Reboot the node ${_.name} and wait for restart
            #-----------------------------------------------------
            _.reboot "ctrmc|caa|clstrmgrES"
            rc=$?
            (( $rc != ${RC.OK} )) && return $rc
        fi
        
	#-----------------------------------------------------
        : Systematicaly restart clstrmgrES daemon
        #-----------------------------------------------------
        cmd="/usr/bin/lssrc -s clstrmgrES | tail -n +2"
        _.clrsh_cmd output "$cmd"
        clstrmgr_state=$(echo $output | awk '{print $4}')

        if [[ "$clstrmgr_state" != "active" ]]
        then
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 5 'Starting cluster manager daemon: clstrmgrES...\n'
            output=""
            cmd="/usr/bin/startsrc -s clstrmgrES"
            _.clrsh_cmd output "$cmd"
            if (( $? != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 54 'ERROR: Failed to start cluster manager daemon clstrmgrES on node: %1$s\n'\
                    "${_.name}"
                rc=${RC.FAILURE}
            fi
        fi
	
        
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.stop_mode=\"${_.stop_mode}\""
        
        [[ ${_.stop_mode} == ${MANAGE_MODE.NONE} ]] && return ${RC.OK}
        
        if [[ -z $ezu_mode || $ezu_mode == ${MANAGE_MODE.ONLINE} \
            || $ezu_mode == ${MANAGE_MODE.NON_DISRUPTIVE} \
            || $ezu_mode == ${MANAGE_MODE.SILENT} \
            || $ezu_mode == ${MANAGE_MODE.ROLLING} ]]
        then
            if [[ ${_.stop_mode} == ${MANAGE_MODE.UNMANAGE} ]]
            then
                start_mode=${MANAGE_MODE.AUTO}
            elif [[ ${_.stop_mode} == ${MANAGE_MODE.OFFLINE} ]]
            then
                start_mode=${MANAGE_MODE.MANUAL}
            elif [[ ${_.stop_mode} == ${MANAGE_MODE.MOVE} ]]
            then
                start_mode=${MANAGE_MODE.AUTO}
            fi
        else
            start_mode=$ezu_mode  # change ezu_mode here (should be powerHA mode)
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.start_mode=\"${start_mode}\""
        
        if [[ -n $start_mode ]]
        then
            _.start_PowerHA $start_mode
            rc=$?
         
	    echo ${_.aix_version} | IFS=. read VV RR MM FF
            #
            : If AIX version is greater than 7.1 then start the vg in active mode
	    : cl_ezupdate works only for AIX 7 & higher so check only RR digit
            #
            if (( $RR > 1 ))
            then
                #-----------------------------------------------------
                : Put active concurent VGs in normal concurent mode
                #-----------------------------------------------------
                for vg in ${_.active_conc_vg}
                do
                    DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 6 'Setting active concurent VG: %1$s in normal concurent mode.\n'\
                        "$vg"
                    cmd="/usr/sbin/varyonvg -a $vg"
                    _.clrsh_cmd output "$cmd"
                    if (( $? != 0 ))
                    then
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 60 'ERROR: Failed to set active concurent VG: %1$s in normal concurent mode.\n'\
                            "$vg"
                        (( rc == ${RC.OK} )) && rc=${RC.FAILURE}
                        # best effort, continue
                    fi
                done
             fi
        fi
        
        _.refresh "node_status"
        
        return $rc
    } # End of "start()"
    
    #####################################################################
    #
    # NAME: start_PowerHA
    #
    # FUNCTION:
    #   Start the cluster event manager on node depending of the start mode.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: mode = "auto"|"manual"
    #
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.FAILURE - error starting the node
    #     RC.OK      - Success
    #
    # OUTPUT:
    #####################################################################
    function start_PowerHA {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset cmd=""
        typeset -i rc=${RC.OK}
        typeset cmd_output=""
        typeset mode="$1"
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t start_PowerHA"
        
        #-----------------------------------------------------
        : Start cluster event manager clevmgrdES on node ${_.name} with MANAGE=$mode
        #-----------------------------------------------------
        DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 7 'Starting PowerHA cluster services on node: %1$s in %2$s mode...\n'\
            "${_.name}" "$mode"
        cmd="$CLMGR_CMD start node ${_.name} WHEN=now MANAGE=$mode"
        log_trace 0 "$0()[$LINENO]($SECONDS): $cmd"
        $cmd 2>&1
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): rc=$rc"
        
        if (( $rc != 0 && $rc != 127 )); then
            # no DSPMSG here as it's already done in CLMGR_CMD
            rc=${RC.FAILURE}
        else
            rc=${RC.OK}
        fi
        
        return $rc
    } # End of "start_PowerHA()"
 
  
    #####################################################################
    #
    # NAME: check_if_cluster_services_running
    #
    # FUNCTION:
    #   Check if the cluster services are running on the node.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #
    #     global:
    #
    #
    # RETURNS:
    #
    # OUTPUT:
    #####################################################################
    function check_if_cluster_services_running {

        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x

        typeset cmd=""
        typeset output=""
        typeset state=""

	cmd="$CLMGR_CMD -cv -a name,state q node"
        cmd="$cmd ${_.name} | tail -n +2"

	output=$(eval $cmd 2>&1)
        state=$(echo $output | awk -F : '{print $2}'| sed "s/ //g")
        if [[ "$state" == "NORMAL" ]]
        then
            log_trace 0 "$0()[$LINENO]($SECONDS): Cluster services are up and running on node : ${_.name}"
        else
            log_trace 0 "$0()[$LINENO]($SECONDS): Cluster services are down on node : ${_.name} in the state:$state"
            DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 67 'WARNING: Cluster service are down on node %1$s in the state:%2$s.\n' "${_.name}" "$state"
        fi
    } # End of "check_if_cluster_services_running()" 
 
  
    #####################################################################
    #
    # NAME: check_and_verify_hdisk
    #
    # FUNCTION:
    #   Validate the hdisk given as an input to the Node.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #
    # RETURNS:
    #     RC.OK      - Success
    #     RC.FAILURE - Failure
    #	  RC.PHA_CMD_ERROR - PowerHA command error 	
    #
    # OUTPUT:
    #####################################################################
    function check_and_verify_hdisk {

        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x

        log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t check_and_verify_hdisk"

        typeset rootvg_hdisk=""
        typeset -i rootvg_size=0
        typeset -i rootvg_block_size=0
        typeset -i rootvg_block_num=0
        typeset -i hdisk_size=0
	typeset shared_disks_pvids=""
        typeset hdisk_pvid=""
        typeset cmd=""
	typeset vg=""
	typeset -i total_hdisk_size=0

	typeset bootdisk_list=""
        typeset -A hdisk_bootdisk_pair

	#
        #Get list of disks shared in a cluster
        #
        cmd="/usr/es/sbin/cluster/cspoc/cllspvids"
        cmd_output=$($cmd 2>&1)
        rc=$?

        if (( $rc != ${RC.OK} ))
        then
            return ${RC.PHA_CMD_ERROR}
        fi

        shared_disks_pvids=$(echo "$cmd_output" |  awk -F '(' '{print $1}')
        log_trace 0 "$0()[$LINENO]($SECONDS): list of shared disks on node ${_.name} is $shared_disks_pvids"

	for disk in ${_.hdisk}
	do

	    cmd="dd if=/dev/${disk} bs=1b count=512 of=/dev/null"
            _.clrsh_cmd cmd_output "$cmd"
            rc=$?

            if (( $rc != ${RC.OK} ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 91 'ERROR: %1$s on node:%2$s is not accessible. Try another disk' "${disk}" "${_.name}"
                return ${RC.FAILURE}
            fi

            cmd="lspv | grep -w $disk"
            _.clrsh_cmd cmd_output "$cmd"
            rc=$?

            if (( $rc != ${RC.OK} ))
            then
            	DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 68 'ERROR: Wrong disk: %1$s entered for node: %2$s.\n'\
                    "$disk" "${_.name}"
            	return ${RC.FAILURE}
            fi

	    hdisk_pvid=$(echo $cmd_output | awk '{print $2}')
            log_trace 0 "$0()[$LINENO]($SECONDS): The PVID for hdisk:$disk on node:${_.name} is $hdisk_pvid"

            count=0
            echo "$cmd_output" | while read hdisk_name _pvid hdisk_vg hdisk_state
            do

		if (( $without_copy == 1 ))
                then
		
		    # Checking if backup rootvg exists when -x is provided	
                    if [[ $hdisk_vg == "None" ]]
                    then
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 75 'ERROR: Take backup of rootvg on %1$s for %2$s using -X option and try this operation' "$disk" "${_.name}"
                        return ${RC.FAILURE}
                    fi

	   	    #If the hdisk provided with -x is active
		    #then put it to sleep as the next command
		    #will not work properly in that case	
                    if [[ $hdisk_state == "active" ]]
                    then
                        cmd="alt_rootvg_op -S -t $hdisk_name"
                        _.clrsh_cmd cmd_output "$cmd"
                        rc=$?

                        if (( $rc != ${RC.OK} ))
                        then
			    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 86 'ERROR: Unable to put to sleep %1$s for checking boot volume for hdisk list on node:%2$s' "$hdisk_name" "${_.name}"
                            return ${RC.FAILURE}
                        fi
                    fi

                    cmd="alt_rootvg_op -q -d $hdisk_name"
                    _.clrsh_cmd cmd_output "$cmd"
                    rc=$?

                    if (( $rc != ${RC.OK} ))
                    then
			DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 87 'ERROR: Unable to get boot volume for %1$s list on node:%2$s' "$hdisk_name" "${_.name}"
                        return ${RC.FAILURE}
                    fi

		    #store the boot volume name for each disk
                    hdisk_bootdisk_pair[$hdisk_name]=$cmd_output

		    #store the list of boot volumes 
		    #in case it is already not there
                    if [[ $bootdisk_list != *"$cmd_output"* ]]
                    then
                        bootdisk_list="$bootdisk_list $cmd_output"
                    fi
		else
		    if [[ $hdisk_vg != None ]]
	            then
			DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 92 'ERROR: Disk %1$s for node %2$s already has a volume group assigned to it' "$disk" "${_.name}"
                        return ${RC.FAILURE}
             	    fi

                fi
		

            	if [[ $disk == $hdisk_name ]]
            	then
                    (( count++ ))
                else
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 68 'ERROR: Wrong disk: %1$s entered for node: %2$s.\n'\
                    	"$disk" "${_.name}"
                    return ${RC.FAILURE}
           	 fi
            done

	    #--------------------------------------------------------
	    # Check if the hdisk belongs to more than one PVID
	    # If yes then exit the script prompting the error message 
	    # and asking user to re-enter another hdisk
	    #--------------------------------------------------------
            if (( $count > 1 ))
            then
           	DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 69 'ERROR: Hdisk : %1$s entered for node: %2$s belongs to more than 1 PVID'\
                    "$disk" "${_.name}"
            	DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 70 'Please enter another hdisk for node: %1$s' "${_.name}"
            	return ${RC.FAILURE}
            fi

            #---------------------------------------------------------
            # Check whether the hdsik provided for rollback is
            # shared with other nodes or not.
            # In case it is shared then simply exit with error message
            #---------------------------------------------------------
            if [[ "$shared_disks_pvids" == *"$hdisk_pvid"* ]]
            then
            	DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 73 'ERROR: The disk:%1$s on node:%2$s is shared with other nodes' "$disk" "${_.name}"
            	DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 74 'Enter non-shared hdisk for node: %1$s' "${_.name}"
            	return ${RC.FAILURE}
            fi	
	done

	
	#In case -x is provided check there are proper boot volumes
	if (( $without_copy == 1 ))
        then

            count=0
            for bootdisk in $bootdisk_list
            do
                (( count++ ))
            done

	    #count greater than one means that the hdisk in hdisk list 
	    #for the node is has more than one and different boot volumes
	    #But in case count is 1 means that the boot volume for each disk
	    #in hdisk list has same boot volume		
            if (( $count > 1 ))
            then
            	DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 88 'ERROR: The hdisk list:%1$s on node:%2$s has more than one boot volumes' "${_.hdisk}" "${_.name}"
		
		#Printing the summary here of the boot volumes
		#for each hdisk in hdisk list for that node
                for disk in ${_.hdisk}
                do
            	    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 89 'The hdisk:%1$s has boot volume on %2$s' "$disk" "${hdisk_bootdisk_pair[$disk]}"
                done
                return ${RC.FAILURE}

            else

                bootdisk=$(echo ${_.hdisk} | sed "s/$bootdisk//")
                _.hdisk="$bootdisk_list $bootdisk"

                log_trace 5 "$0()[$LINENO]($SECONDS): The update hdisk on node:${_.name} is ${_.hdisk}"
            fi
        fi

        cmd="lspv | grep -w rootvg"
        _.clrsh_cmd cmd_output "$cmd"
        rc=$?

   	if (( $rc != ${RC.OK} ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 71 'ERROR: Cannot find rootvg hdisk on node:%1$s\n' "${_.name}"
            return ${RC.FAILURE}
        fi

        rootvg_hdisk=$(echo $cmd_output |awk '{print $1}')
    
        #
	#Get rootvg PP size from lspv
	#
        cmd="lspv $rootvg_hdisk | tail -n +5 | head -n 1"
        _.clrsh_cmd cmd_output "$cmd"
        rc=$?
        rootvg_block_num=$(echo $cmd_output | awk '{ print $3}')

	#
	#Get rootvg used PPsvalue from lspv
	#
        cmd="lspv $rootvg_hdisk | tail -n +8 | head -n 1"
        _.clrsh_cmd cmd_output "$cmd"
        rc=$?
        rootvg_block_size=$(echo $cmd_output | awk '{print $3}')

        rootvg_size=$(($rootvg_block_num*$rootvg_block_size))

        log_trace 0 "$0()[$LINENO]($SECONDS): rootvg size = $rootvg_size"

	#
        #Get hdisk size
        #
        for disk in ${_.hdisk}
        do
            cmd="getconf DISK_SIZE /dev/$disk"
            _.clrsh_cmd cmd_output "$cmd"
            rc=$?
            hdisk_size=cmd_output
            log_trace 0 "$0()[$LINENO]($SECONDS): hdisk: $disk size = $hdisk_size"
            total_hdisk_size+=hdisk_size
        done

        log_trace 0 "$0()[$LINENO]($SECONDS): Total hdisk size for node: ${_.name} $total_hdisk_size"

        #
        #The hdisk size should be greater than rootvg in order to continue
        #
        if (( $total_hdisk_size < $rootvg_size ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 72 'ERROR: Hdisk size is less than the rootvg hdisk size. Please try with another hdisk'
            return ${RC.FAILURE}
        fi

        return ${RC.OK}
    } # End of "check_and_verify_hdisk()"    


    #####################################################################
    #
    # NAME: reboot
    #
    # FUNCTION:
    #   Reboots the node's LPAR
    #   if not the local node,
    #   - wait for the lpar to boot, to be pingable and then sshable
    #   - wait for SRC services passed in parameter to be active
    #   else
    #   - add an entry named ezu into /etc/inittab file
    #   - reboot is initiated
    #   - at restart, inittab runs the cl_ezupdate script with flag -B <start_mode>
    #   - upon error creating the inittab entry, the user can run the EZU script again after LPAR reboot
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #   Caller should manage the stop node first if needed.
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: services = "service1[|service2|...]"
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.FAILURE    - cannot reboot the node
    #     RC.PING_ERROR - timed out before pingable
    #     RC.SSH_ERROR  - timed out before clrshable
    #     RC.OK         - Success
    #
    # OUTPUT:
    #####################################################################
    function reboot {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset cmd=""
        typeset -i rc=${RC.FAILURE}
        typeset output=""           # to get command output
        typeset services="$1"       # list of services separated with a '|'
        typeset -i timeout=600      # wait 10 minutes max
        typeset -i remain=$timeout  # remaining time
        typeset -i period=10         # to chect every 10 seconds
        typeset subsystem=""        # to parse lssrc command output
        typeset state=""            # to parse lssrc command output
        typeset start_mode=""       # mode to restart clstrmgrES
        typeset debug_option=""     # debug option for command in initab
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Entered reboot"
        
        [[ "$DEBUG_MODE" == "yes" ]] && debug_option="-D"
        
        if [[ ${_.localization} == ${NODE_LOCALIZATION.LOCAL}\
            && ${_.stop_mode} != ${MANAGE_MODE.NONE} ]]
        then

	    #
	    : In case of rollback on local node then
	    : copy the EZUpdate.log files from rootvg
	    : to backup rootvg i.e. altinst_rootvg
	    : just before the reboot
	    #	
	    if (( $rb == 1 || $rb == 2 ))
            then

                log_trace 5 "$0()[$LINENO]($SECONDS): Start copying EZUpdate logs from rootvg to altinst_rootvg on node:${_.name}"

                log_trace 5 "$0()[$LINENO]($SECONDS): The altinst_rootvg on ${_.name} is on disk : ${_.hdisk}"

                #
                #Wake-up altinst_rootvg on local node
                #
                cmd="alt_rootvg_op -W -d ${_.hdisk}"
                log_trace 0 "$0()[$LINENO]($SECONDS):execute cmd:  $cmd"
                cmd_output=$($cmd 2>&1)
                rc=$?
                log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"
                log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"

                if (( $rc == ${RC.FAILURE} ))
                then
                    DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 76 'WARNING: Could not wake up altinst_rootvg to copy EZUpdate logs on node: %1$s' "${_.name}"
                fi

		if (( $rc == ${RC.OK} ))
                then

		    #If -x is provided then create a inittab entry in backup rootvg if required
                    if (( $without_copy == 1 ))
                    then
                        #
                        # Check an entry rc.cluster to the /etc/inittab file in existing rootvg backup.
			# In case it is already there then no need to create ezu entry in inittab entry
                        #
                        grep "rc.cluster" /alt_inst/etc/inittab > /dev/null 2>&1
                        rc=$?
                        log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"

                        if (( $rc != 0 ))
                        then

			    #Set the start_mode to auto to start the cluster services
                            #in that mode. In case the services are not running then
                            #the code will not reach here and so no inittab entry will
                            #be made in case cluster services are not running

                            start_mode=${MANAGE_MODE.AUTO}

                            # if the node was OFFLINE, no entry to add
                            if [[ -n $start_mode ]]
                            then

                                grep -v "ezu" /alt_inst/etc/inittab > /tmp/inittab
                                rc=$?
                                log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"

                                str="ezu:2:once:/usr/es/sbin/cluster/utilities/cl_ezupdate -B $start_mode \
                                        >/var/hacmp/EZUpdate/EZUpdate-inittab.out \
                                        2>/var/hacmp/EZUpdate/EZUpdate-inittab.err"

                                echo $str >> /tmp/inittab
                                rc=$?
                                log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"

                                cmd="mv /tmp/inittab /alt_inst/etc/inittab"
                                log_trace 0 "$0()[$LINENO]($SECONDS):execute cmd:  $cmd"
                                cmd_output=$($cmd 2>&1)
                                rc=$?
                                log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"
                                log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
                           fi
                           start_mode=""
                       fi
                    fi	

                    #
                    #Copy the EZUpdate.log file from rootvg to altinst_rootvg
                    #
                    cmd="cp /var/hacmp/EZUpdate/EZUpdate.log /alt_inst/var/hacmp/EZUpdate/EZUpdate.log"
                    log_trace 0 "$0()[$LINENO]($SECONDS):execute cmd:  $cmd"
                    cmd_output=$($cmd 2>&1)
                    rc=$?
                    log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"
                    log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"

                    if (( $rc == ${RC.FAILURE} ))
                    then
                        DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 77 'WARNING: Could not copy EZUpdate.log to back-up rootvg: altinst_rootvg'
                    fi

                    #
                    #Put to sleep old_rootvg
                    #
                    cmd="alt_rootvg_op -S -t $old_rootvg_hdisk"
                    log_trace 0 "$0()[$LINENO]($SECONDS):execute cmd:  $cmd"
                    cmd_output=$($cmd 2>&1)
                    rc=$?
                    log_trace 0 "$0()[$LINENO]($SECONDS):command returns code: $rc"
                    log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"

                    if (( $rc == ${RC.FAILURE} ))
                    then
                        DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 78 'WARNING: Could not put to sleep altinst_rootvg on node: %1$s' "${_.name}"
                    fi
                    
                 fi
            fi
 
            #-----------------------------------------------------
            : On the local node, need to manage the cluster event
            : manager restart after reboot.
            #-----------------------------------------------------
            if [[ ${_.stop_mode} == ${MANAGE_MODE.MOVE} ]]
            then
                start_mode=${MANAGE_MODE.AUTO}
            else
                start_mode=${MANAGE_MODE.MANUAL}
            fi
            # if the node was OFFLINE, no entry to add
            if [[ -n $start_mode ]]
            then

		#Check if an entry of rc.cluster with name hacmp6000 is 
		#already there then there is no need to create ezu
		#entry in inittab
		/usr/sbin/lsitab hacmp6000 > /dev/null 2>&1
                if (( $? != 0 ))
                then		

                    #
                    : Add an entry "ezu" to the /etc/inittab file.
                    #
                    /usr/sbin/lsitab ezu > /dev/null 2>&1
                    if (( $? == 0 ))                # if entry not there
                    then
                    	: ezu entry already exists in /etc/inittab, replace it.
                    	/usr/sbin/rmitab ezu >/dev/null 2>&1
                    	if (( $? != 0 ))
                    	then
                            DSP_MSG ${MSG_TYPE.ERR} $CL_EZUPDATE_MAIN_SET 10 'ERROR: Failed to remove %1$s entry from /etc/inittab file of node: %2$s.\n'\
                            	"ezu" "${node.name}"
                        fi
                    fi
                    /usr/sbin/mkitab "ezu:2:once:$CMD_PATH/cl_ezupdate -B ${start_mode} ${debug_option} \
                    	>/var/hacmp/EZUpdate/EZUpdate-inittab.out \
                    	2>/var/hacmp/EZUpdate/EZUpdate-inittab.err"
                    if (( $? != 0 ))
                    then
                    	# Continue anyway, display a warning, but the user can
                    	# run the script manually
                    	DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 90 '\
			    ERROR: Failed to add %1$s entry to /etc/inittab file of node: %2$s.\n\
    			    After the node has rebooted, you can restart the PowerHA cluster services\n\
    			    running: %3$s\n'\
                            "ezu" "${node.name}"\
                            "$CLMGR_CMD start node ${_.name} WHEN=now MANAGE=$start_mode"
                    fi
		fi
            fi
        fi
        
        #-----------------------------------------------------
        : Reboot the LPAR
        #-----------------------------------------------------
        DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 8 'Rebooting node %1$s...\n' "${_.name}"
        log_trace 0 "$0()[$LINENO]($SECONDS): Rebooting node ${_.name}."
        cmd="sync; sync; /usr/sbin/reboot"
        _.clrsh_cmd output "$cmd"
        
        # This part would not be executed on local node as it has just rebooted
        if (( $? != 0 && $? != 255 ))  # due to reboot, clrsh cannot get rc
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 9 'WARNING: Cannot reboot node %1$s.\n' "${_.name}"
            return ${RC.FAILURE}
        else
            : let some time to start the lpar reboot.
            sleep 10
        fi
        
        #
        : Wait for the node, to be pingable...
        #
        for (( remain = $timeout; remain > 0; remain = remain - period ))
        do
            /etc/ping -c1 -w$period ${_.name} >/dev/null 2>&1
            (( $? == 0 )) && break
            print -- ".\c"        # progress indicator
        done
        if (( remain == 0 )); then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 10 '\nWARNING: timed out before node %1$s is reachable using %2$s.\n'\
                "${_.name}" "ping"
            log_trace 0 "$0()[$LINENO]($SECONDS): timed out before node ${_.name} is pingable."
            return ${RC.PING_ERROR}
        fi
        log_trace 0 "$0()[$LINENO]($SECONDS): can now ping node ${_.name}."
        
        #
        : Wait for the node, to be clrshable...
        #
        for (( remain = $timeout; remain > 0; remain = remain - period ))
        do
            # do not use clrsh_cmd as we don't want stderr to be displayed
            $CAA_CLRSH -n ${_.name} 'exit' >/dev/null 2>&1
            (( $? == 0 )) && break
            sleep $period
            print -- ".\c"        # progress indicator
        done
        if (( remain == 0 )); then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 10 '\nWARNING: timed out before node %1$s is reachable using %2$s.\n'\
                "${_.name}" "$CAA_CLRSH"
            log_trace 0 "$0()[$LINENO]($SECONDS): timed out before node ${_.name} is reachable with clrsh."
            return ${RC.SSH_ERROR}
        fi
        log_trace 0 "$0()[$LINENO]($SECONDS): can now reach node ${_.name} with clrsh."

        if [[ -z "$services" ]]
        then
            rc=${RC.OK}
        else
            #
            : Wait for SRC services "$services" to be active...
            #
            cmd="/usr/bin/lssrc -a | grep -v \$(/usr/bin/dspmsg src.cat -s 1 1 active)"
            for (( remain = $timeout; remain > 0; remain = remain - period ))
            do
                # get the list of not active Subsystem
                _.clrsh_cmd output "$cmd"
                if (( $? != 0 ))
                then
                    DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 11 '\nWARNING: Cannot get states of %1$s services on node %2$s.\n'\
                        "$services" "${_.name}"
                elif [[ "$output" =~ .*($services) ]]
                then
                    # one service still not active, wait
                    sleep $period
                    print -- ".\c"        # progress indicator
                else
                    # all services in parameter is active, break
                    rc=${RC.OK}
                    break
                fi
            done
            
            if (( remain == 0 )); then
                DSP_MSG ${MSG_TYPE.WARN}DSPMSG $NODE_T_SET 12 '\nWARNING: timed out before services %1$s on node %2$s become active\n'\
                    "$services" "${_.name}"
                log_trace 0 "$0()[$LINENO]($SECONDS): timeout before services $services on node ${_.name} are active."
            else
                log_trace 0 "$0()[$LINENO]($SECONDS): services $services on node ${_.name} are active."
            fi
        fi
        log_trace 0 "$0()[$LINENO]($SECONDS): node ${_.name} reboot completed."
        
        return $rc
    } # End of "reboot()"


    #####################################################################
    #
    # NAME: clrsh_cmd
    #
    # FUNCTION:
    #   Execute a command on a local or remote node.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #   It uses /usr/es/sbin/cluster/utilities/cl_rsh for remote node.
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: OUTPUT to put the output of the command execution
    #           2: as many parameter as wanted to specify the command
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     the command return code
    #
    # OUTPUT:
    #     OUTPUT contains the output of the command
    #####################################################################
    function clrsh_cmd {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset -n OUTPUT=$1
        shift;
        typeset cmd="$*"
        typeset -i cmd_rc
        typeset cmd_output=""
        cmd="$cmd 2>&1"
        
        [[ ${_.localization} == ${NODE_LOCALIZATION.REMOTE} ]] && cmd="$CLRSH -n ${_.name} '$cmd'"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"

	#-----------------------------------------------------------------------
        : In case the debugging is enabled the output from this method is
        : compared on both remote and local node to check that the content of
        : repository is same on both nodes. So while debugging is enabled
        : the ouput on local node is added with some extra information which is
        : not the case in remote node. And hence there is an output mismatch and
        : cl_ezupdate gives error. So we disable debugging while getting the
        : command output here.
        #------------------------------------------------------------------------
        if [[ ( "$VERBOSE_LOGGING" == "high" ) || ( "$DEBUG_MODE" == "yes" ) ]]
        then
            set +x
        fi

        cmd_output=$(eval $cmd)
        cmd_rc=$?

	#Enable debugging again as it was disabled previously to get the correct output
        if [[ ( "$VERBOSE_LOGGING" == "high" ) || ( "$DEBUG_MODE" == "yes" ) ]]
        then
            set -x
        fi

        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code:$cmd_rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        OUTPUT=$cmd_output
        return $cmd_rc
    } # End of "clrsh_cmd()"
    
    #####################################################################
    #
    # NAME: cli_cmd
    #
    # FUNCTION:
    #   Execute a command on a local or remote node using cli_on_node.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #   It uses /usr/es/sbin/cluster/cspoc/cli_on_node
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: OUTPUT to put the output of the command execution
    #           2: the command to execute
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     the command return code
    #
    # OUTPUT:
    #     OUTPUT contains the output of the command
    #####################################################################
    function cli_cmd {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset -n OUTPUT=$1
        typeset cmd="$2"
        typeset -i cmd_rc
        typeset cmd_output=""
        typeset tmp_name=${_.name}    # avoid . in sed search string
        
        [[ ${_.localization} == ${NODE_LOCALIZATION.REMOTE} ]] && cmd="$CLI_ON_NODE ${_.name} ${cmd}"
        
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        
        # eval for imbedded quotes on the local node command
        if [[ ${_.localization} == ${NODE_LOCALIZATION.REMOTE} ]]; then
            cmd_output=$($cmd 2>&1)
        else
            cmd_output=$(eval $cmd 2>&1)
        fi
        cmd_rc=$?
        
        # remove node name from cli_on_node output
        if [[ ${_.localization} == ${NODE_LOCALIZATION.REMOTE} ]]
        then
            tmp_co=$(echo $cmd_output | sed "s/^$tmp_name: //")
            cmd_output=$tmp_co
        fi
        
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code:$cmd_rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        
        (( $cmd_rc == 0 )) && OUTPUT=$cmd_output
        return $cmd_rc
    } # End of "cli_cmd()"
    
    
    #####################################################################
    #
    # NAME: check_nim_server
    #
    # FUNCTION:
    #   Check the nim server connection and get lpp sources
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: OUTPUT to put the list of NIM lpp source
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.NOT_FOUND  - no NIM server configured
    #     RC.PING_ERROR - cannot ping nim master
    #     RC.FAILURE    - nim master not accessible
    #     RC.OK         - Success
    #
    # OUTPUT:
    #     OUTPUT contains the list of NIM lpp source
    #####################################################################
    function check_nim_server {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset -n OUTPUT=$1
        typeset cmd=""
        typeset list_output=""
        
        if [[ -z ${_.nim_master} ]]
        then
            #-----------------------------------------------------
            : NIM server not configured
            #-----------------------------------------------------
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 13 'ERROR: There is no NIM server configured on node: %1$s.\n'\
                "${_.name}"
            return ${RC.NOT_FOUND}
        fi
        
        # First test
        cmd="/etc/ping -c 1 -w 3 ${_.nim_master}"
        _.clrsh_cmd list_output "$cmd"
        if (( $? != 0 ))
        then
            #-----------------------------------------------------
            : Cannot ping NIM server
            #-----------------------------------------------------
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 14 'ERROR: Cannot ping the NIM server: %1$s from node: %2$s.\n'\
                "${_.nim_master}" "${_.name}"
           return ${RC.PING_ERROR}
        fi
        
        # Second test
        cmd="/usr/sbin/nimclient -l |grep lpp_source"
        _.clrsh_cmd list_output "$cmd"
        if (( $? != 0 ))
        then
            #-----------------------------------------------------
            : nimclient cannot reach NIM server
            #-----------------------------------------------------
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 15 'ERROR: The configured NIM server %1$s is not accessible from node: %2$s.\n'\
                "${_.nim_master}" "${_.name}"
           return ${RC.FAILURE}
        else
            OUTPUT="$list_output"
        fi
        
        return ${RC.OK}
    } # End of "check_nim_server()"



    #####################################################################
    #
    # NAME: check_dir
    #
    # FUNCTION:
    #   Check if a directory exists and is not empty
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: lpp_source or directory
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.NOT_FOUND - directory not found
    #     RC.EMPTY_DIR - directory is empty
    #     RC.OK        - Success
    #
    # OUTPUT:
    #####################################################################
    function check_dir {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset directory="$1"
        typeset output=""
        
        # Check if the directory exists
        cmd="/usr/bin/test -d $directory"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 16 'WARNING: Directory %1$s does not exist on the node: %2$s\n'\
                "$directory" "${_.name}"
            return ${RC.NOT_FOUND}
        fi
        
        # Check for empty directory
        cmd="/usr/bin/ls -A $directory"
        _.clrsh_cmd output "$cmd"
        if [[ $? != 0 || -z $output ]]
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 17 'WARNING: Directory %1$s is empty\n' "$directory"
            return ${RC.EMPTY_DIR}
        fi
        
        return ${RC.OK}
    } # End of "check_dir()"
    
    
    #####################################################################
    #
    # NAME: check_rgs_can_move
    #
    # FUNCTION:
    #   Check if all active RGs on the node can be move on an other node
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #   it is up to the caller to refresh the node information
    #   if an RG has no elligible node active, a message is displayed
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.RG_CANNOT_MOVE - at least one RG that cannot be moved
    #     RC.OK             - Success
    #
    # OUTPUT:
    #####################################################################
    function check_rgs_can_move {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset rg=""
        typeset node_name=""
        typeset -i can_move=0
        typeset -i rg_cannot_move=0
        typeset cmd=""
        typeset cmd_output=""           # to get command output
        typeset node_list=""
        
        #
        : Get node list
        : Need a query here to get all the nodes in a cluster 
        #
        cmd="$CLMGR_CMD query nodes"
        log_trace 0 "$0()[$LINENO]($SECONDS): execute cmd: $cmd"
        cmd_output=$($cmd 2>&1)
        rc=$?
        log_trace 0 "$0()[$LINENO]($SECONDS): command returns code: $rc"
        log_trace 0 "$0()[$LINENO]($SECONDS): command output='$cmd_output'"
        if (( $rc != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
            return ${RC.PHA_CMD_ERROR}
        fi
        node_list=$cmd_output
        
        for rg in ${_.active_rgs}
        do
            can_move=0
            for node_name in $node_list
            do
                [[ $node_name == ${_.name} ]] && continue
                
                eval "node=${cluster.nodes[$node_name]}"
                if [[ "${node.eligible_rgs}" =~ .*($rg).* ]]
                then
                    if [[  ${node.state} == "ONLINE" \
                        || ${node.state} == "NORMAL" ]]
                    then
                        can_move=1
                        break
                    fi
                fi
            done
            if (( can_move == 0 ))
            then
                DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 51 'WARNING: Cannot move Resource Group: %1$s from node: %2$s, to another active backup node.\n'\
                    "${rg}" "${_.name}"
                rg_cannot_move+=1
            fi
        done
        
        (( rg_cannot_move != 0 )) && return ${RC.RG_CANNOT_MOVE}
        return ${RC.OK}
        
    } # End of "check_rgs_can_move()"


    #####################################################################
    #
    # NAME: get_items_from_source
    #
    # FUNCTION:
    #   Check the lpp from lpp_source or directory
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: lpp_source or directory
    #     global:
    #           Node_t
    #           item_list
    #           fileset_list
    #           ifix_list
    #
    # RETURNS: (int)
    #     RC.FAILURE - no available item from source
    #     RC.OK      - Success
    #
    # OUTPUT: list of items from source
    #####################################################################
    function get_items_from_source {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset repos="$1"
        typeset output=""
        typeset cmd=""
        typeset -i rc=0
        typeset substring="the \"resource\" attribute is required for this"
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t get_items_from_source"
        if [[ $source == ${UPDATE_SOURCE.LOCAL} ]]
        then
            cmd="LANG=C /usr/sbin/geninstall -L -d $repos"
        else
            cmd="/usr/sbin/nimclient -o showres -a installp_flags=-L -a resource=$repos"
        fi
        _.clrsh_cmd output "$cmd"
        rc=$?
        
        if [[ "$output" == *"$substring"* ]]
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 66 'ERROR: Wrong NIM resource. Please check and retry'
            return ${RC.FAILURE}
        fi    
        
        if (( $rc == 1 ))
        then
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                # The nimclient -o showres command above will fail if the
                # lpp_source resourse has only IFIXs and no .bff files
                # so we account for that here
                
                log_trace 1 "$0()[$LINENO]($SECONDS): Check for only IFIX files"
                saverc=$rc
                saveoutput=$output
                
                cmd="/usr/sbin/nimclient -o showres -a sm_inst_flags=\"list_filesets -L\" -a resource=$repos"
                _.clrsh_cmd output "$cmd"
                rc=$?
                
                if (( $rc == 0 ))
                then
                    # this was a case where the lpp_source resource has
                    # only IFIX files and no .bff files
                    
                    log_trace 1 "$0()[$LINENO]($SECONDS): nimclient failed because lpp_source had only IFIX files, no .bff files"
                    output=""
                    rc=0
                else
                    # the original nimclient command had an error
                    # other than that caused be a lpp_source with
                    # no .bff files
                    
                    rc=$saverc
                    output=$saveoutput
                fi
            fi
         fi
        
        if (( $rc == 0 ))
        then
            # workaround for defect 1004288 against cmdgeninst component
            typeset substr="No space left on device"
            if [[ "${output#*$substr}" != "$output" ]]
            then
                DSP_MSG ${MSG_TYPE.ERR} $CLUSTER_T_SET 4 'ERROR: command "%1$s" failed.\n' "$cmd"
                log_error "$substr"
                return ${RC.FAILURE}
            fi
            # end workaround
            _.item_list="$output"
            _.fileset_list=$(echo "${output}"| awk -F : ' { if (  $5 != "E" ) { print $2 } }' )
            
            if [[ $source == ${UPDATE_SOURCE.LOCAL} ]]
            then
                _.ifix_list=$(echo "${output}"| awk -F : ' { if (  $5 == "E" ) { print $2 } }' )
            else
                # get ifix from nim server
                cmd="/usr/sbin/nimclient -o showres -a sm_inst_flags=\"list_filesets -L\" -a resource=$repos"
                _.clrsh_cmd output "$cmd"
                rc=$?
                if (( $rc == 0 ))
                then
                    _.ifix_list=$(echo "${output}"| awk -F : ' { if (  $1 == "E" ) { print $2 } }' )
                    _.item_list=$(echo "${_.item_list}\n${_.ifix_list}")
                else
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 18 'No available Ifix from source: %1$s on node: %2$s:\n'\
                        "${repos}" "${_.name}"
                    rc=${RC.FAILURE}
                fi
            fi
        else
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 19 'WARNING: No available item from source: %1$s on node: %2$s:\n'\
                "${repos}" "${_.name}"
            rc=${RC.FAILURE}
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.fileset_list=\"${_.fileset_list}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.ifix_list=\"${_.ifix_list}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.item_list=\"${_.item_list}\""
        return $rc
    } # End of "get_items_from_source()"
    
    
    #####################################################################
    #
    # NAME: build_items_to_install
    #
    # FUNCTION:
    #   Build the list of fileset that will be installed during install
    #   process: this list depend on the already installed fileset and
    #   versions.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: list of lpp to install
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.FAILURE - error with fileset or ifix to install
    #     RC.OK      - Success
    #
    # OUTPUT:
    #     _.lpp_to_update contains the list of lpp to update
    #####################################################################
    function build_items_to_install {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset output=""
        typeset cmd=""
        typeset -i rc=${RC.OK}
        
        typeset local_dir=""
        typeset ifix_dir=""
        typeset Fileset=""
        typeset vrmf=""
        typeset PTF_Id=""
        typeset State=""
        typeset Type=""
        typeset Description=""
        typeset EFIX_Locked=""
        
        typeset ifix=""
        typeset label=""
        typeset ifix_label_to_install=""
        typeset ifix_label_to_reinstall=""
        typeset package=""
        
        typeset lpps_locked=""
        typeset lpp_locked=(
            typeset -A name
            typeset -A label
        )
        
        typeset version=""
        typeset release=""
        typeset maintenance=""
        typeset fix=""
        typeset -h 'Installed version' i_version=""
        typeset -h 'Installed release' i_release=""
        typeset -h 'Installed maintenance release' i_maintenance=""
        typeset -h 'Installed fix' i_fix=""
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Enter Node_t build_items_to_installe"
        
        if [[ -n ${_.ifix_list} ]]
        then
            _.ifix_label=$(echo "${_.ifix_list}"| awk -F . ' {  print $1 }' | tr '\n' ' ')
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.ifix_label=\"${_.ifix_label}\""
            
            #------------------------------------------------------------
            : Build the list of ifix label to remove
            #------------------------------------------------------------
            output=""
            cmd="/usr/sbin/emgr -l -v2"
            _.clrsh_cmd output "$cmd"
            if (( $? != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 55 'ERROR: Failed to get Ifix list on node: %1$s\n'\
                    "${_.name}"
                return ${RC.FAILURE}
            fi
            _.ifix_label_to_remove=$(echo "$output" |awk '$1 == "LABEL:"  {print $2}' | tr '\n' ' ')
            _.ifix_label_to_remove="${_.ifix_label_to_remove} ${_.ifix_label}"
            # extract labels that are in the both list
            _.ifix_label_to_remove=$(echo "${_.ifix_label_to_remove}"\
                | tr ' ' '\n' | sort | uniq -d | tr '\n' ' ' | sed 's/\ *$//')
            log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.ifix_label_to_remove=\"${_.ifix_label_to_remove}\""
        fi
        
        #-----------------------------------------------------
        : Build the list of fileset to manage install and/or reject
        #-----------------------------------------------------
        # get installed filesets
        cmd="/usr/bin/lslpp -qLc all"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 56 'ERROR: Failed to get fileset list on node: %1$s\n'\
                "${_.name}"
            return ${RC.FAILURE}
        fi
        
        echo "$output" | while IFS=":" read Package Fileset vrmf State PTF_Id Fix_State Type Description Destination_Dir Uninstaller\
                                       Message_Catalog Message_Set Message_Number Parent Automatic EFIX_Locked Install_Path Build_Date
        do
            # verify if fileset is installed and present on the repository.
            # else continue
            [[ ${rep_lpp_tab.name[${Fileset}]} != ${Fileset} ]] && continue
            
            # compare vrmf of installed fileset with vrmf of installable fileset from repository
            
            echo ${vrmf} | IFS="." read i_version i_release i_maintenance i_fix
            echo ${rep_lpp_tab.level[${Fileset}]} | IFS="." read version release maintenance fix
            
            
            if [[ $Fix_State == "A" ]]
            then
                #-----------------------------------------------------
                : Build the list of fileset to reject or commit
                #-----------------------------------------------------
                _.applied_list=${_.applied_list:+${_.applied_list} }${Fileset}
                [[ ${rep_lpp_tab.quiesce[$Fileset]} == "Y" && -z  ${_.reject_manage_mode} ]] && _.reject_manage_mode=${MANAGE_MODE.SILENT}
                [[ ( "$Fileset" == "cluster.es.server.rte" ||  "$Fileset" == "cluster.es.client.lib" ) && -z  ${_.reject_manage_mode} ]] && _.reject_manage_mode=${MANAGE_MODE.NON_DISRUPTIVE}
                if [[ "${EFIX_Locked}" != "0" ]]
                then
                    lpp_locked.name[$Fileset]=$Fileset
                    _.reject_locked_list=${_.reject_locked_list:+${_.reject_locked_list} }${Fileset}
                fi
                #------------------------------------------------------------
                : Set managment mode for reject operation
                #------------------------------------------------------------
                [[ ${rep_lpp_tab.quiesce[$Fileset]} == "B" || ${rep_lpp_tab.quiesce[$Fileset]} == "b" ]] && _.reject_manage_mode=${MANAGE_MODE.ROLLING}
                if [[ ${_.reject_manage_mode} != ${MANAGE_MODE.ROLLING} ]]
                then
                    [[ ${rep_lpp_tab.quiesce[$Fileset]} == "Y" ]] && _.reject_manage_mode=${MANAGE_MODE.SILENT}
                    if [[ ${_.reject_manage_mode} != ${MANAGE_MODE.SILENT} ]]
                    then
                        [[ "$Fileset" == "cluster.es.server.rte" ||  "$Fileset" == "cluster.es.client.lib" ]] && _.reject_manage_mode=${MANAGE_MODE.NON_DISRUPTIVE}
                    fi
                fi
            fi
            
            #-----------------------------------------------------------------
            # Check that the vrmf of installed fileset is less than the 
            # vrmf of the fileset to be installed. If not then skip the 
            # update 
            #-----------------------------------------------------------------  
            if [[ ${version} < ${i_version} ]]
            then
                #------------------------------------------------------------
                : Cannot install a new version $Fileset: ${rep_lpp_tab.level[$Fileset]}
                #------------------------------------------------------------
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 62 'The Fileset: %1$s is already on higher version %2$s on node: %3$s. Uninstall the fileset and try again for version : %4$s \n\n'\
            "${Fileset}" "${vrmf}" "${_.name}" "${rep_lpp_tab.level[$Fileset]}"    
                rc=${RC.FAILURE}
                continue
            elif (( ${i_version} == ${version} ))
            then
                if [[  ${release} < ${i_release} ]]
                then
                    #------------------------------------------------------------
                    : Cannot install a new release $Fileset: ${rep_lpp_tab.level[$Fileset]}
                    #------------------------------------------------------------
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 63 'The Fileset: %1$s is already on higher release %2$s on node: %3$s. Uninstall the fileset and try again for release: %4$s \n\n'\
            "${Fileset}" "${vrmf}" "${_.name}" "${rep_lpp_tab.level[$Fileset]}"
                    rc=${RC.FAILURE}
                    continue    
                elif ((  ${release} == ${i_release} ))
                then
                    
                    (( ${i_maintenance} > ${maintenance} )) && continue
                    
                    if [[  ${i_maintenance} < ${maintenance} ]]
                    then
                        if [[ "${Fileset}" == "bos.rte.install" ]]
                        then
                            #--------------------------------------------------------------------------------
                            : Error message to inform that AIX TL update is not supported using cl_ezupdate
                            #--------------------------------------------------------------------------------
                            
                            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 61 'cl_ezupdate does not support updating AIX technology levels' 
                            rc=${RC.FAILURE}
                            continue
                        fi
                    fi
                    
                    (( ${i_maintenance} == ${maintenance} && ${i_fix} >= ${fix} )) && continue
                fi
            fi
            
            # test EFIX_Locked
            if [[ "$EFIX_Locked" != "0" ]]
            then
                lpp_locked.name[$Fileset]=$Fileset
                _.install_locked_list=${_.install_locked_list:+${_.install_locked_list} }${Fileset}
            fi
            
            #------------------------------------------------------------
            : Add fileset to the list of lpp to update
            #------------------------------------------------------------
            _.lpp_to_update=${_.lpp_to_update:+${_.lpp_to_update} }${Fileset}
            
            #------------------------------------------------------------
            : Set the managment mode for apply operation
            #------------------------------------------------------------
            [[ ${rep_lpp_tab.quiesce[$Fileset]} == "B" || ${rep_lpp_tab.quiesce[$Fileset]} == "b" ]] && _.manage_mode=${MANAGE_MODE.ROLLING}
            [[ ${_.manage_mode} == ${MANAGE_MODE.ROLLING} ]] && continue
            [[ ${rep_lpp_tab.quiesce[$Fileset]} == "Y" ]] && _.manage_mode=${MANAGE_MODE.SILENT}
            [[ ${_.manage_mode} == ${MANAGE_MODE.SILENT} ]] && continue
            [[ ( "$Fileset" == "cluster.es.server.rte" ||  "$Fileset" == "cluster.es.client.lib" ) ]] && _.manage_mode=${MANAGE_MODE.NON_DISRUPTIVE}
        done
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.applied_list=\"${_.applied_list}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.reject_locked_list=\"${_.reject_locked_list}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.install_locked_list=\"${_.install_locked_list}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.manage_mode=\"${_.manage_mode}\""
        
        if (( $rc != ${RC.OK} ))
        then
            return $rc
        fi
        
        
        #------------------------------------------------------------
        : Build the list of locking ifix for install or reject action if Ifix are locking some fileset.
        #------------------------------------------------------------
        if [[ -n ${_.install_locked_list} || -n ${_.reject_locked_list} ]]
        then
            cmd="/usr/sbin/emgr -P| tail -n +4"
            _.clrsh_cmd output "$cmd"
            if (( $? != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 57 'ERROR: Failed to get package list on node: %1$s\n'\
                    "${_.name}"
                return ${RC.FAILURE}
            fi
            echo "$output" |while read package installer label
            do
                lpp_locked.name[$package]=$package
                lpp_locked.label[$package]=${lpp_locked.label[$package]:+${lpp_locked.label[$package]} }${label}
            done
        fi
        if [[ -n ${_.install_locked_list} ]]
        then
            for fs in ${_.install_locked_list}
            do
                _.ifix_locking_apply=${_.ifix_locking_apply:+${_.ifix_locking_apply} }${lpp_locked.label[$fs]}
            done
            _.ifix_locking_apply=$(echo ${_.ifix_locking_apply} | tr ' ' '\n' | uniq | tr '\n' ' ')
            # extract the ifix that are looking fs and that will be reinstalled after FS installation
            ifix_label_to_reinstall=$(echo "${_.ifix_locking_apply} ${_.ifix_label}" | tr ' ' '\n' |sort | uniq -d| tr '\n' ' ')
            
            # check if an Ifix require a reboot and set the node manage_mode accordingly
            for ifix in ${_.ifix_locking_apply}
            do
                cmd="/usr/sbin/emgr -l -v3 -L $ifix"
                _.clrsh_cmd output "$cmd"
                if (( $? != 0 ))
                then
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 23 'ERROR: Unable to get Ifix details: %1$s from: %2$s.\n'\
                        "$ifix" "$repository"
                    rc=${RC.FAILURE}
                    continue
                fi
                reboot=$(echo "$output" |awk '$1 == "REBOOT" && $2 == "REQUIRED:" {print $3}')
                if [[ $reboot == "yes" ]]
                then
                    _.manage_mode=${MANAGE_MODE.ROLLING}
                    break
                fi
            done
            if (( $rc != ${RC.OK} ))
            then
                return $rc
            fi
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.ifix_locking_apply=\"${_.ifix_locking_apply}\""
        
        
        #-----------------------------------------------------
        : Build the list of Ifixes to install
        : and set the manage_mode if a reboot is required
        #-----------------------------------------------------
        # extract labels that are in the ifix_label but not in ifix_label_to_remove
        # that are not already installed
        ifix_label_to_install=$(echo "${_.ifix_label_to_remove} ${_.ifix_label}" | tr ' ' '\n' | sort | uniq -u | tr '\n' ' ')
        # add the Ifixes that should be reinstalled
        ifix_label_to_install=$(echo "${ifix_label_to_install} ${ifix_label_to_reinstall}" | tr ' ' '\n' | sort | uniq | tr '\n' ' ')
        
        if [[ $source == ${UPDATE_SOURCE.LOCAL} ]]
        then
            ifix_dir="$repository"
        else
            # mount nim resource so we can extract information for Ifix
            _.mount_nim_res local_dir
            (( $? != ${RC.OK} )) && return ${RC.FAILURE}
            ifix_dir="${local_dir}/emgr/ppc"
        fi
        
        for ifix in ${_.ifix_list}
        do
            label=""
            # extract packages and boot flag for each Ifix
            cmd="/usr/sbin/emgr -d -v3 -e ${ifix_dir}/$ifix"
            _.clrsh_cmd output "$cmd"
            if (( $? != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 23 'ERROR: Unable to get Ifix details: %1$s from: %2$s.\n'\
                    "$ifix" "$repository"
                return ${RC.FAILURE}
            fi
            reboot=$(echo "$output" |awk '$1 == "REBOOT" && $2 == "REQUIRED:" {print $3}')
            packages=$(echo "$output" |awk '$1 == "PACKAGE:" {print $2}' | sort -u | uniq)
       
	    #
            : Check if RSCT ifix requires powerha to be offline
            # 
	    if [[ ${_.rsct_requires_powerha_offline} == "no" ]]
            then
                rsct_check_powerha_offline=$(echo "$output" | grep -w RSCT_REQUIRES_POWERHA_OFFLINE)
                if (( $? == 0 ))
                then
                    _.rsct_requires_powerha_offline="yes"
		    log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.rsct_requires_powerha_offline=\"${_.rsct_requires_powerha_offline}\""
                fi
            fi
 
	    #-------------------------------------------------------------------
	    : The IFIX stored in variable ifix_list might contain
	    : files which do not belong to any package/filesets.
	    : In that case the package variable will contain substring None
	    : In this case the command lslpp -qlc $packages will fail with rc 1
	    : and that ifix will not be added to the list of ifix to install
	    #--------------------------------------------------------------------	 
	    packages=$(echo $packages | sed 's/None//g')
 
            # check the package is installed
            cmd="/usr/bin/lslpp -qlc $packages"
            _.clrsh_cmd output "$cmd"
            # if this command failed, this is not an error, just do not install this Ifix
            if (( $? == 0 ))
            then
                label=$(echo $ifix |awk -F "." '{print $1}')
                label=$(echo "${label} ${ifix_label_to_install}" | tr ' ' '\n' | sort | uniq -d | tr '\n' '')
                if [[ -n $label ]]
                then
                    _.ifix_to_install=${_.ifix_to_install:+${_.ifix_to_install} }${ifix}
                    [[ $reboot == "yes" ]] && _.manage_mode=${MANAGE_MODE.ROLLING}
                fi
            fi
        done
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.ifix_to_install=\"${_.ifix_to_install}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.manage_mode=\"${_.manage_mode}\""
        
        if [[ $source == ${UPDATE_SOURCE.NIM} ]]
        then
            # umount nim resource
            _.umount_nim_res
            (( $? != ${RC.OK} )) && return ${RC.FAILURE}
        fi
        
        
        #------------------------------------
        : Manage ifix locking the reject
        #------------------------------------
        if [[ -n ${_.reject_locked_list} ]]
        then
            for fs in ${_.reject_locked_list}
            do
                _.ifix_locking_reject=${_.ifix_locking_reject:+${_.ifix_locking_reject} }${lpp_locked.label[$fs]}
            done
            _.ifix_label_to_remove=$(echo "${_.ifix_locking_reject} ${_.ifix_label_to_remove}" | tr ' ' '\n' | sort -u | tr '\n' ' ')
     fi               
        
        #-----------------------------------------------------
        : set the reject_manage_mode if a reboot is required
        #-----------------------------------------------------
        for ifix in ${_.ifix_label_to_remove}
        do
            cmd="/usr/sbin/emgr -l -v3 -L $ifix"
            _.clrsh_cmd output "$cmd"
            if (( $? != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 23 'ERROR: Unable to get Ifix details: %1$s from: %2$s.\n'\
                    "$ifix" "$repository"
                rc=${RC.FAILURE}
                continue
            fi
            reboot=$(echo "$output" |awk '$1 == "REBOOT" && $2 == "REQUIRED:" {print $3}')
            if [[ $reboot == "yes" ]]
            then
                _.reject_manage_mode=${MANAGE_MODE.ROLLING}
                break
             fi

	    #
            : Check if RSCT ifix requires powerha to be offline
            #
            if [[ ${_.rsct_requires_powerha_offline} == "no" ]]
            then
                rsct_check_powerha_offline=$(echo "$output" | grep -w RSCT_REQUIRES_POWERHA_OFFLINE)
                if (( $? == 0 ))
                then
                    _.rsct_requires_powerha_offline="yes"
		    log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.rsct_requires_powerha_offline=\"${_.rsct_requires_powerha_offline}\""
                fi
            fi
        done
        # No return error needed here as only logging after this point
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.ifix_locking_reject=\"${_.ifix_locking_reject}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.ifix_label_to_remove=\"${_.ifix_label_to_remove}\""
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.reject_manage_mode=\"${_.reject_manage_mode}\""
        
        return $rc
    } # End of "build_items_to_install()"
    
    
    #####################################################################
    #
    # NAME: mount_nim_res
    #
    # FUNCTION:
    #   Allocate and mount nim resource on local directory
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: dir to put the local directory used to mount the resource
    #     global:
    #           Node_t, repository
    #
    # RETURNS: (int)
    #     RC.FAILURE - error while mounting the NIM resource
    #     RC.OK      - Success
    #
    # OUTPUT:
    #     dir contains the local directory used to mount the resource
    #####################################################################
    function mount_nim_res {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset -n dir=$1
        typeset cmd=""
        typeset output=""
        typeset -i rc=${RC.OK}
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Enter Node_t mount_nim_res"
        
        # get nim resource location
        if [[ -z ${_.nim_res_loc} ]]
        then
            cmd="/usr/sbin/nimclient -l -l $repository"
            _.clrsh_cmd output "$cmd"
            if (( $? != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 20 'ERROR: Unable to get lpp_source: %1$s from server: %2$s.\n'\
                    "${repository}" "${_.nim_master}"
                return ${RC.FAILURE}
            fi
            _.nim_res_loc=$(echo "$output" | awk '$1 == "location" {print $3}')
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.nim_res_loc=\"${_.nim_res_loc}\""
        
        # alocate nim resource then mount remote repository
        cmd="/usr/sbin/nimclient -o allocate -a lpp_source=$repository"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 21 'ERROR: Unable to allocate lpp_source: %1$s from server: %2$s.\n'\
                "${repository}" "${_.nim_master}"
            return ${RC.FAILURE}
        fi
        dir="/tmp/${repository}_${_.nim_master}"
        
        # create the local directory to mount the repository
        cmd="if [[ ! -d ${dir} ]]; then /usr/bin/mkdir ${dir}; else exit 0; fi"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 52 'ERROR: Unable to create directory %1$s on node: %2$s.\n'\
                "${dir}" "${_.name}"
            rc=${RC.FAILURE}
        else
            log_trace 0 "$0()[$LINENO]($SECONDS): created directory ${dir} on node: ${_.name}"
        fi
        
        # mount repository on a local directory
        cmd="/usr/sbin/mount ${_.nim_master}:${_.nim_res_loc} ${dir}"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 22 'ERROR: Unable to mount lpp_source: %1$s from server: %2$s.\n'\
                "${repository}" "${_.nim_master}"
            return ${RC.FAILURE}
        fi
        log_trace 0 "$0()[$LINENO]($SECONDS): mounted lpp_source: ${repository} from server: ${_.nim_master} on node: ${_.name}"
        
        return $rc
    } # End of "mount_nim_res()"
    
    
    #####################################################################
    #
    # NAME: umount_nim_res
    #
    # FUNCTION:
    #   Umount and then de-allocate NIM resource
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #           Node_t, repository
    #
    # RETURNS: (int)
    #     RC.FAILURE - error
    #     RC.OK      - Success
    #
    # OUTPUT:
    #####################################################################
    function umount_nim_res {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset local_dir="/tmp/${repository}_${_.nim_master}"
        typeset cmd=""
        typeset output=""
        typeset -i rc=${RC.OK}
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Enter umount_nim_res"
        
        # umount nim resource
        cmd="if /usr/sbin/mount | /usr/bin/grep ${repository}_${_.nim_master} >/dev/null 2>&1; \
             then \
                /usr/sbin/unmount $local_dir; \
             else \
                exit 0; \
             fi"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 45 'ERROR: Unable to umount lpp_source: %1$s from server: %2$s.\n'\
                "${repository}" "${_.nim_master}"
            rc=${RC.FAILURE}
        else
            log_trace 0 "$0()[$LINENO]($SECONDS): unmounted lpp_source: ${repository} from server: ${_.nim_master} on node: ${_.name}"
        fi
        
        # de-allocate nim resource
        cmd="/usr/sbin/nimclient -o deallocate -a lpp_source=$repository"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 46 'ERROR: Unable to deallocate lpp_source: %1$s from server: %2$s.\n'\
                "${repository}" "${_.nim_master}"
            rc=${RC.FAILURE}
        else
            log_trace 0 "$0()[$LINENO]($SECONDS): de-allocated lpp_source: ${repository} from server: ${_.nim_master} on node: ${_.name}"
        fi
        
        return $rc
    } # End of "umount_nim_res()"
    
   
    #####################################################################
    #
    # NAME: rollback_reboot
    #
    # FUNCTION:
    #   Perform the reboot and start cluster service after rollback on the node
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #
    # RETURNS: (int)
    #     RC.FAILURE - error
    #     RC.OK      - Success
    #
    # OUTPUT: reboots the node after rollback
    #####################################################################
    function rollback_reboot {
	
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x

	typeset cmd=""
        typeset -i rc=${RC.OK}
        typeset cmd_output=""
        typeset ezu_mode="$1"
        typeset start_mode=""
        typeset output=""

	log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t rollback_reboot"
        [[ -z $ezu_mode ]] && ezu_mode=${_.manage_mode}

	log_trace 1 "$0()[$LINENO]($SECONDS): Node_t rollback_reboot ${_.name}.stop_mode=\"${_.stop_mode}\""
        log_trace 5 "$0()[$LINENO]($SECONDS): ezu_mode is $ezu_mode"

	#-----------------------------------------------------
        : Reboot the node ${_.name} and wait for restart
        #-----------------------------------------------------
        _.reboot "ctrmc|caa|clstrmgrES"
        rc=$?
        (( $rc != ${RC.OK} )) && return $rc	

	#-----------------------------------------------------
        : Systematicaly restart clstrmgrES daemon
        #-----------------------------------------------------
        DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 5 'Starting cluster manager daemon: clstrmgrES...\n'
        output=""
        cmd="/usr/bin/startsrc -s clstrmgrES"
        _.clrsh_cmd output "$cmd"
        if (( $? != 0 ))
        then
            DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 54 'ERROR: Failed to start cluster manager daemon clstrmgrES on node: %1$s\n'\
                "${_.name}"
            rc=${RC.FAILURE}
        fi

	log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.stop_mode=\"${_.stop_mode}\""
        
        [[ ${_.stop_mode} == ${MANAGE_MODE.NONE} ]] && return ${RC.OK}
        
        if [[ -z $ezu_mode || $ezu_mode == ${MANAGE_MODE.ONLINE} \
            || $ezu_mode == ${MANAGE_MODE.NON_DISRUPTIVE} \
            || $ezu_mode == ${MANAGE_MODE.SILENT} \
            || $ezu_mode == ${MANAGE_MODE.ROLLING} ]]
        then
            if [[ ${_.stop_mode} == ${MANAGE_MODE.UNMANAGE} ]]
            then
                start_mode=${MANAGE_MODE.AUTO}
            elif [[ ${_.stop_mode} == ${MANAGE_MODE.OFFLINE} ]]
            then
                start_mode=${MANAGE_MODE.MANUAL}
            elif [[ ${_.stop_mode} == ${MANAGE_MODE.MOVE} ]]
            then
                start_mode=${MANAGE_MODE.AUTO}
            fi
        else
            start_mode=$ezu_mode  # change ezu_mode here (should be powerHA mode)
        fi
        log_trace 1 "$0()[$LINENO]($SECONDS): ${_.name}.start_mode=\"${start_mode}\""
        
        if [[ -n $start_mode ]]
        then
            _.start_PowerHA $start_mode
            rc=$?
	fi
        
        _.refresh "node_status"	

	log_trace 5 "$0()[$LINENO]($SECONDS): Exiting Node_t rollback_reboot with rc:$rc"	
	return $rc
    } # End of "rollback_reboot()"	

 
    #####################################################################
    #
    # NAME: rollback
    #
    # FUNCTION:
    #   Perform the Rollback on the node 
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #
    # RETURNS: (int)
    #     RC.FAILURE - error
    #     RC.OK      - Success
    #
    # OUTPUT: Rollbacks the node 
    #####################################################################
    function rollback {

	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x

	typeset BOOTLIST=""
	typeset cmd=""
	typeset cmd_output=""
	typeset manage_mode=$1

	log_trace 5 "$0()[$LINENO]($SECONDS): Entered Node_t rollback"

	log_trace 1 "$0()[$LINENO]($SECONDS): Node_t rollback ${_.name}.stop_mode=\"${_.stop_mode}\""
	BOOTLIST="${_.hdisk} ${_.bootlist}"

	log_trace 1 "$0()[$LINENO]($SECONDS): Node_t rollback bootlist for rollback is $BOOTLIST"

	cmd="bootlist -m normal $BOOTLIST"
	_.clrsh_cmd cmd_output "$cmd"

	(( $? !=0 )) && return ${RC.FAILURE}
	_.rollback_reboot ${manage_mode}
 	(( $? !=0 )) && return ${RC.FAILURE}

	log_trace 5 "$0()[$LINENO]($SECONDS): Exiting Node_t rollback" 
	return ${RC.OK}	

    } # End of "rollback()"
 

    #####################################################################
    #
    # NAME: install_preview
    #
    # FUNCTION:
    #   Install in preview mode lpp and ifix from repository
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: source repository (optional)
    #     global:
    #           Node_t
    #           repository
    #
    # RETURNS: (int)
    #     RC.FAILURE - error
    #     RC.OK      - Success
    #
    # OUTPUT: list of lpp from source
    #####################################################################
    function install_preview {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset repos="$1"
        typeset output=""
        typeset -h "command with parameter to install lpp_source"    cmd=""
        typeset -i rc=0
        typeset -i ifix_rc=0
        typeset ifix=""
        typeset local_dir=""
        typeset ifix_dir=""
        typeset -h "install flags command"  installp_flags="apgqXw"
        typeset -h "installp bundle for nimclient"   bnd_file=""    
        
        ####################################################################################
        # -a    Applies one or more software products or updates. This is the default action.
        #       This flag can be used with the -c flag to apply and commit a software product update when installed.
        # -p    Performs a preview of an action by running all preinstallation checks for the specified action. This flag is only valid
        #       with apply, commit, reject, and remove (-a, -c, -r, and -u) flags.
        # -g    When used to install or commit, this flag automatically installs or commits, respectively,
        #       any software products or updates that are requisites of the specified software product.
        #       When used to remove or reject software, this flag automatically removes or rejects dependents of the specified software.
        #       The -g flag is not valid when used with the -F flag.
        # Note: This flag also automatically pulls in a superseding update present on the media if the specified update is not present.
        #       This flag causes the newest update to be installed for a given fileset,
        #       when there are multiple superseding updates for the same fileset on the installation media.
        # -Q    Suppresses errors and warnings concerning products failing to install due to instrequisites. This flag applies only to AIX 4.2 or later.
        # -X    Attempts to expand any file systems where there is insufficient space to do the installation.
        #       This option expands file systems based on current available space and size estimates that are provided by the software product package.
        #       Note that it is possible to exhaust available disk space during an installation even if the -X flag is specified,
        #       especially if other files are being created or expanded in the same file systems during an installation.
        #       Also note that any remote file systems cannot be expanded.
        # -Y    Agrees to required software license agreements for software to be installed. This flag is only valid with the -a flag.
        # -w    Does not wildcard FilesetName. Use this flag from smit so it only installs the fileset chosen and not the filesets that match. For example, if you choose
        #       foo.rte, foo.rte.bar is not automatically pulled in, as it would be by default, without the -w flag.
        ####################################################################################
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Enter Node_t install_preview"
        
        [[ -z $repos ]] && repos=$repository
        
        if [[ -n ${_.lpp_to_update} ]]
        then
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 27 'Installing fileset updates in preview mode on node: %1$s...\n'\
                "${_.name}"
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                bnd_file="/tmp/${repository}_${_.nim_master}.bnd"
                cmd="touch $bnd_file"
                 _.clrsh_cmd output "$cmd"
                for file in ${_.lpp_to_update}
                do
                    cmd="echo "I:$file" >> $bnd_file"
                    _.clrsh_cmd output "$cmd"
                    if (( $? != 0 ))
                    then
                        rc=${RC.FAILURE}
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 64 'ERROR: Failed to create bundle file on node: %1$s.\n'\
                            "${_.name}"
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
                    fi
                done
                cmd="/usr/sbin/nimclient -o cust -a installp_flags=$installp_flags -a accept_licenses=yes -a lpp_source=${repos} -a filesets=\"-f${bnd_file}\""
            else
                cmd="/usr/sbin/installp -$installp_flags -Y -d $repository ${_.lpp_to_update}"
                ifix_dir="$repos"
            fi
            _.clrsh_cmd output "$cmd"
            if (( $? == 0 ))
            then
                DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 28 'Succeeded to install preview updates on node: %1$s.\n'\
                    "${_.name}"
            else
                rc=${RC.FAILURE}
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 29 'WARNING: Failed to install preview updates on node: %1$s.\n'\
                    "${_.name}"
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
            fi
            
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                cmd="rm $bnd_file"
                _.clrsh_cmd output "$cmd"
                if (( $? != 0 ))
                then
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 65 'WARNING: Failed to delete file %1$s on node: %2$s.\n'\
                            "$bnd_file" "${_.name}"
                fi
            fi    
        fi
        if [[ -n ${_.ifix_to_install} ]]
        then
            
            #------------------------------------
            # Set the variable ifix_dir with 
            # the path to where ifix is stored
            #------------------------------------
            ifix_dir="$repos"
            
            # mount nim resource
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                _.mount_nim_res local_dir
                (( $? != ${RC.OK} )) && return ${RC.FAILURE}
                ifix_dir="${local_dir}/emgr/ppc"
            fi
            
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 31 'Installing Ifixes in preview mode on node: %1$s...\n'\
                "${_.name}"
            rc=${RC.OK}
            for ifix in ${_.ifix_to_install}
            do
                cmd="/usr/sbin/emgr -e ${ifix_dir}/$ifix -p"
                _.clrsh_cmd output "$cmd"
                (( $? != 0 )) && rc=${RC.FAILURE} # best effort, continue
            done
            if (( $rc == ${RC.OK} ))
            then
                DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 32 'Succeeded to install preview Ifixes on node: %1$s.\n'\
                    "${_.name}"
            else
                DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 33 'WARNING: Failed to install preview Ifixes on node: %1$s.\n'\
                    "${_.name}"
                DSP_MSG ${MSG_TYPE.WARN} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
            fi
            
            # umount nim resource
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                _.umount_nim_res
                (( $? != ${RC.OK} && rc == ${RC.OK} )) && rc=${RC.FAILURE}
            fi
        fi
        
        return $rc
    } # End of "install_preview()"
    
    

    #####################################################################
    #
    # NAME: install_apply
    #
    # FUNCTION:
    #   Install lpp from repository
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: source repository (optional)
    #     global:
    #           Node_t
    #           repository (if not specified)
    #
    # RETURNS: (int)
    #     RC.FAILURE - error while installing fileset or ifix
    #     RC.OK      - Success
    #
    # OUTPUT:
    #####################################################################
    function install_apply {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset repos="$1"
        typeset output=""
        typeset cmd=""
        typeset -i rc=${RC.OK}
        typeset local_dir=""
        typeset ifix_dir=""
        typeset ifix=""
        typeset -h "command with parameter to install lpp_source"    cmd=""
        typeset -h "install flags command"  installp_flags="agXw"
        typeset -h "installp bundle for nimclient"   bnd_file=""

        log_trace 5 "$0()[$LINENO]($SECONDS): Enter Node_t install_apply"
        
        ####################################################################################
        # -a    Applies one or more software products or updates. This is the default action.
        #       This flag can be used with the -c flag to apply and commit a software product update when installed.
        # -g    When used to install or commit, this flag automatically installs or commits, respectively,
        #       any software products or updates that are requisites of the specified software product.
        #       When used to remove or reject software, this flag automatically removes or rejects dependents of the specified software.
        #       The -g flag is not valid when used with the -F flag.
        # Note: This flag also automatically pulls in a superseding update present on the media if the specified update is not present.
        #       This flag causes the newest update to be installed for a given fileset,
        #       when there are multiple superseding updates for the same fileset on the installation media.
        # -Q    Suppresses errors and warnings concerning products failing to install due to instrequisites. This flag applies only to AIX 4.2 or later.
        # -X    Attempts to expand any file systems where there is insufficient space to do the installation.
        #       This option expands file systems based on current available space and size estimates that are provided by the software product package.
        #       Note that it is possible to exhaust available disk space during an installation even if the -X flag is specified,
        #       especially if other files are being created or expanded in the same file systems during an installation.
        #       Also note that any remote file systems cannot be expanded.
        # -Y    Agrees to required software license agreements for software to be installed. This flag is only valid with the -a flag.
        # -w    Does not wildcard FilesetName. Use this flag from smit so it only installs the fileset chosen and not the filesets that match. For example, if you choose
        #       foo.rte, foo.rte.bar is not automatically pulled in, as it would be by default, without the -w flag.
        ####################################################################################
        
        #-----------------------------------------------------
        : First remove locking Ifix on ${_.name}
        #-----------------------------------------------------
        if [[ -n ${_.ifix_locking_apply} ]]
        then
            for ifix in ${_.ifix_locking_apply}
            do
                DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 34 'Removing Ifix: %1$s on node: %2$s ...\n'\
                    "${ifix}" "${_.name}"
                cmd="/usr/sbin/emgr -r -L $ifix"
                _.clrsh_cmd output "$cmd"
                if (( $? != 0 ))
                then
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 58 'ERROR: Failed to remove Ifixes on node: %1$s\n'\
                        "${_.name}"
                    rc=${RC.FAILURE}
                fi
            done
        fi
        (( $rc != ${RC.OK} )) && return $rc
        
        #-----------------------------------------------------
        : Apply updates on node ${_.name}
        #-----------------------------------------------------
        [[ -z $repos ]] && repos=$repository
        if [[ -n ${_.lpp_to_update} ]]
        then
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                bnd_file="/tmp/${repository}_${_.nim_master}.bnd"
                cmd="touch $bnd_file"
                 _.clrsh_cmd output "$cmd"
                for file in ${_.lpp_to_update}
                do
                    cmd="echo "I:$file" >> $bnd_file"
                    _.clrsh_cmd output "$cmd"
                    if(( $? != 0 ))
                    then
                        rc=${RC.FAILURE}
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 64 'ERROR: Failed to create bundle file on node: %1$s.\n'\
                            "${_.name}"
                        DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
                    fi
                done
                cmd="/usr/sbin/nimclient -o cust -a installp_flags=$installp_flags -a accept_licenses=yes -a lpp_source=${repos} -a filesets=\"-f${bnd_file}\""
            else
                cmd="/usr/sbin/geninstall -Y -I $installp_flags -d $repos ${_.lpp_to_update}"
                ifix_dir="$repos"
            fi
            
            
             DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 35 'Applying updates on node: %1$s...\n' "${_.name}"
             
             _.clrsh_cmd output "$cmd"
            if (( $? != 0 ))
            then
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 37 'ERROR: Failed to apply updates on node: %1$s.\n'\
                    "${_.name}"
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
                #
                # Once locking Ifixes are removed (here before), we cannot rollback
                # this step. So just leave in error.
                #
                return ${RC.FAILURE}
            fi
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 36 'Succeeded to apply updates on node: %1$s.\n'\
                "${_.name}"
            
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                cmd="rm $bnd_file"
                _.clrsh_cmd output "$cmd"
                if(( $? != 0 ))
                then
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 65 'WARNING: Failed to delete file %1$s on node: %2$s.\n'\
                            "$bnd_file" "${_.name}"
                fi
            fi    
        fi
        
        #-----------------------------------------------------
        : Install Ifixes on node ${_.name}
        #-----------------------------------------------------
        if [[ -n ${_.ifix_to_install} ]]
        then
            
            #-------------------------------------------------
            # updated the variable ifix_dir with the
            # the path to where ifix is stored      
            #------------------------------------------------
            
            ifix_dir="$repos"     
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                _.mount_nim_res local_dir
                (( $? != ${RC.OK} )) && return ${RC.FAILURE}
                ifix_dir="${local_dir}/emgr/ppc"
            fi
            
            rc=${RC.OK}
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 59 'Installing Ifixes on node: %1$s...\n'\
                "${_.name}"
            for ifix in ${_.ifix_to_install}
            do
                DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 42 'Installing Ifix: %1$s on node: %2$s ...\n' "${ifix}" "${_.name}"
                cmd="/usr/sbin/emgr -e ${ifix_dir}/$ifix"
                _.clrsh_cmd output "$cmd"
                if (( $? != 0 ))
                then
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 44 'ERROR: Failed to install Ifix: %1$s on node: %2$s.\n'\
                        "${ifix}" "${_.name}"
                    DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
                    (( $rc == ${RC.OK} )) && rc=${RC.FAILURE}
                    break
                else
                    DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 43 'Succeeded to install Ifix: %1$s on node: %2$s.\n'\
                        "${ifix}" "${_.name}"
                fi
            done
            if [[ $source == ${UPDATE_SOURCE.NIM} ]]
            then
                _.umount_nim_res
                (( $? != 0 && $rc == ${RC.OK} )) && rc=${RC.FAILURE}
            fi
        fi
        return $rc
        
    } # End of "install_apply()"
    
    
    #####################################################################
    #
    # NAME: reject
    #
    # FUNCTION:
    #   Reject installed and applied lpp on the node and that are present on the
    #   repository.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: source repository (optional)
    #     global:
    #           Node_t
    #           repository (if not specified)
    #
    # RETURNS: (int)
    #     RC.FAILURE - error while rejecting fileset or ifix
    #     RC.OK      - Success
    #
    # OUTPUT:
    #####################################################################
    function reject {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset repos="$1"
        typeset output=""
        typeset -h "command with parameter to install lpp_source"    cmd=""
        typeset -i rc=${RC.OK}
        typeset ifix=""
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Enter Node_t reject"
        
        ####################################################################################
        # -r    Rejects all specified software updates that are currently applied but not committed. When a software update is rejected any other software product that is
        #    dependent on it (that is, those software products that have the specified software product as a requisite) must also be rejected. The -g flag can be used to
        #    reject automatically dependent software updates. The keyword all is not valid with the reject flag (-r).
        # -g    When used to remove or reject software, this flag automatically removes or rejects dependents of the specified software.
        #       The -g flag is not valid when used with the -F flag.
        # Note: This flag also automatically pulls in a superseding update present on the media if the specified update is not present.
        #       This flag causes the newest update to be installed for a given fileset,
        #       when there are multiple superseding updates for the same fileset on the installation media.
        # -Q    Suppresses errors and warnings concerning products failing to install due to instrequisites. This flag applies only to AIX 4.2 or later.
        # -X    Attempts to expand any file systems where there is insufficient space to do the installation.
        #       This option expands file systems based on current available space and size estimates that are provided by the software product package.
        #       Note that it is possible to exhaust available disk space during an installation even if the -X flag is specified,
        #       especially if other files are being created or expanded in the same file systems during an installation.
        #       Also note that any remote file systems cannot be expanded.
        # -w    Does not wildcard FilesetName. Use this flag from smit so it only installs the fileset chosen and not the filesets that match. For example, if you choose
        #       foo.rte, foo.rte.bar is not automatically pulled in, as it would be by default, without the -w flag.
        ####################################################################################
        
        # first remove locking ifix and ifix to remove
        for ifix in ${_.ifix_label_to_remove}
        do
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 41 'Removing Ifix: %1$s on node: %2$s ...\n' "${ifix}" "${_.name}"
            cmd="/usr/sbin/emgr -r -L $ifix"
            _.clrsh_cmd output "$cmd"
        done
        
        if [[ -n ${_.applied_list} ]]
        then
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 38 'Rejecting applied updates on node: %1$s...\n' "${_.name}"
            cmd="/usr/sbin/installp -rgXJw ${_.applied_list}"
            _.clrsh_cmd output "$cmd"
            if (( $? == 0 ))
            then
                DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 39 'Succeeded to reject updates on node: %1$s.\n'\
                    "${_.name}"
            else
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 40 'ERROR: Failed to reject updates on node: %1$s.\n'\
                    "${_.name}"
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
                rc=${RC.FAILURE}
            fi
        fi
        return $rc
        
    } # End of "reject()"
    
    
    #####################################################################
    #
    # NAME: commit
    #
    # FUNCTION:
    #   Commit already installed and applied lpp on the node that are also
    #   present on the repository.
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #           1: source repository (optional)
    #     global:
    #           Node_t
    #           repository (if not specified)
    #
    # RETURNS: (int)
    #     RC.FAILURE - error with commit
    #     RC.OK      - Success
    #
    # OUTPUT:
    #####################################################################
    function commit {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        
        typeset repos=$1
        typeset output=""
        typeset cmd=""
        typeset -i rc=${RC.OK}
        typeset -h "command with parameter to install lpp_source"    cmd=""
        
        log_trace 5 "$0()[$LINENO]($SECONDS): Enter Node_t commit"
        
        ####################################################################################
        # -c   Commits all specified updates that are currently applied but not committed. When an update is committed all other software products it is dependent on must also be committed
        #      (unless they are already in the committed state). The specified software product is dependent on any software product that is a prerequisite or corequisite of the specified
        #       product. If the requisite software products are not in the committed state, the commit fails and error messages are displayed. The -g flag can be used to automatically commit
        #       requisite software product updates.
        # -g   When used to remove or reject software, this flag automatically removes or rejects dependents of the specified software.
        #       The -g flag is not valid when used with the -F flag.
        # Note: This flag also automatically pulls in a superseding update present on the media if the specified update is not present.
        #       This flag causes the newest update to be installed for a given fileset,
        #       when there are multiple superseding updates for the same fileset on the installation media.
        # -Q    Suppresses errors and warnings concerning products failing to install due to instrequisites. This flag applies only to AIX 4.2 or later.
        # -X    Attempts to expand any file systems where there is insufficient space to do the installation.
        #       This option expands file systems based on current available space and size estimates that are provided by the software product package.
        #       Note that it is possible to exhaust available disk space during an installation even if the -X flag is specified,
        #       especially if other files are being created or expanded in the same file systems during an installation.
        #       Also note that any remote file systems cannot be expanded.
        # -w    Does not wildcard FilesetName. Use this flag from smit so it only installs the fileset chosen and not the filesets that match. For example, if you choose
        #       foo.rte, foo.rte.bar is not automatically pulled in, as it would be by default, without the -w flag.
        ####################################################################################
        
        if [[ -n ${_.applied_list} ]]
        then
            DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 47 'Commiting applied updates on node: %1$s...\n' "${_.name}"
            cmd="/usr/sbin/installp -cJw ${_.applied_list}"
            _.clrsh_cmd output "$cmd"
            if (( $? == 0 ))
            then
                DSP_MSG ${MSG_TYPE.INF} $NODE_T_SET 48 'Succeeded to commit updates on node: %1$s.\n'\
                    "${_.name}"
            else
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 49 'ERROR: Failed to commit updates on node: %1$s.\n'\
                    "$node_name"
                DSP_MSG ${MSG_TYPE.ERR} $NODE_T_SET 85 'Refer to the /var/hacmp/EZUpdate/EZUpdate.log file for more details.\n'
                rc=${RC.FAILURE}
            fi
        fi
        return $rc
        
    } # End of "commit()"
    
    #####################################################################
    #
    # NAME: cleanup
    #
    # FUNCTION:
    #   Cleanup the node
    #   - unmount the exported NIM resource mount point
    #
    # EXECUTION ENVIRONMENT:
    #
    # NOTES:
    #
    # RECOVERY OPERATION:
    #
    # DATA STRUCTURES:
    #     parameters:
    #     global:
    #           Node_t
    #
    # RETURNS: (int)
    #     RC.FAILURE - error
    #     RC.OK      - Success
    #
    #####################################################################
    function cleanup {
        [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
        [[ "$DEBUG_MODE" == "yes" ]] && set -x
        trap 'print -- "$CANNOT_INT_MSG"' INT
        
        typeset output=""
        typeset cmd=""
        typeset -i rc=${RC.OK}
        
        if [[ -n ${_.ifix_list} && $source == ${UPDATE_SOURCE.NIM} ]]
        then
            # unmount the NIM resource directory
            _.umount_nim_res
            rc=$?
        fi
        
        return $rc
    } # End of "cleanup()"
)

export NODE_T_DEFINED=1


