#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2020,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# @(#)  afbc6c4 43haes/usr/sbin/cluster/events/utils/cl_deactivate_fs.sh, 726, 2147A_aha726, Jul 29 2021 07:02 PM
#
#   COMPONENT_NAME: EVENTUTILS
#
#   FUNCTIONS: none
#
#   ORIGINS: 27
#
###############################################################################
#
#  Name:  cl_deactivate_fs
#
#  Given a list of filesystems, unmount any that are currently mounted.
#
#  Returns:
#       0 - All filesystems were successfully unmounted
#       1 - One or more filesystems failed to unmount
#       2 - Zero arguments were passed
#
# Arguments: [ -c ] [ filesystem name .... ]
#
#   If called with '-c' flag, the script invokes the appropriate 
#   oem deactivate function as opposed to AIX umount.
#
#   If called with a list of filesystems, deactivate (unmount) those filesystems
#
#   If called with no list of filesystems, then process by resource group,
#   based on environment variables set by process_resources:
#    RESOURCE_GROUPS - space separated list of resource groups
#    FILE_SYSTEMS - list of filesystems for a resource group
#                 - comma separated list of filesystems within a resource group
#                 - colon separated lists of filesystems for each resource group
#    RECOVERY_METHODS - list of recovery methodes for each filesystem
#                       hdisk.  Same pattern of comma and colon separated
#                       lists as for FILE_SYSTEMS.  Current implementation 
#                       is all filesystems in a resource group have the same
#                       recovery mechanism
#    e.g. -
#       RESOURCE_GROUPS="curley larry moe"
#       FILE_SYSTEMS="/able,/baker,/baker/charley:/lst,/lst/old,/lst/old/older:/zoo,/hold,/least"
#       RECOVERY_METHODS="sequential,sequential,sequential:parallel,parallel,parallel:sequential,sequential,sequential"
#
#  Environment: VERBOSE_LOGGING, PATH
#
###############################################################################


###############################################################################
#
#   Function:	    find_nested_mounts
#
#   Description:    Given a list of file systems, find any mounts below those
#		    file systems that are not in the list.
#
#		    This is done by finding all file systems in the output of
#		    the "mount" command that start with the given file system
#		    path name.
#		    
#		    The purpose of this routine is to avoid having unmounts 
#		    fail because some filesystems have been mounted outside of
#		    PowerHA control.
#		    
#   Input:	    A space separated list of file systems
#
#   Output:	    A space separated list of file systems written to stdout,
#		    containing the original list, plus any that were added.
#
#		    This list is sorted in reverse order, to give the order in
#		    which the file systems should be unmounted.
#
###############################################################################
function find_nested_mounts
{
    if [[ $VERBOSE_LOGGING == "high" ]]
    then
        PS4_FUNC='find_nested_mounts'
        set -x
    fi

    typeset given_fs_list="$1"

    typeset first second third fourth rest
    typeset mount_out=$(mount)
    typeset discovered_fs=""
    typeset line fs nested_fs
    typeset mounted_fs_list

    integer fs_count=0

    for fs in $given_fs_list
    do
	mounted_fs_list=$(print -- "$mount_out" | grep -w $fs)
	fs_count=$(print -- "$mounted_fs_list" | wc -l)
	if (( $fs_count > 1 ))
	then
            echo "$mounted_fs_list" | while read line
	    do
		#
		:   The lines can be of one of two forms, depending on
		:   whether this is a local mount or an NFS mount
		#
		#   E.g.,
		#   node       mounted        mounted over    vfs       date        options
		# -------- ---------------  ---------------  ------ ------------ ---------------
		# ha15a2   /EFS_KS_JFS2     /var/efs         nfs4   Nov 22 10:27 vers=4,hard,intr
		#          /dev/lv03        /nfs_ss          jfs2   Nov 22 10:28 rw,log=/dev/loglv01
		#
		#

		print "$line" | read first second third fourth rest
		nested_fs=""
		if [[ $second == ${fs}/* && $third == jfs* ]]
		then
		    #
		    :   The mount output is of the form
		    :   "lv_name"  "lower_mount_point" ... 
		    :    $first     $second
		    #
		    nested_fs=$second
		elif [[ $third == ${fs}/* && $fourth == nfs* ]]
		then
		    #
		    :   The mount output is of the form
		    :   "exporting_node" "exported_file_system" "lower_mount_point" "vfs"
		    :    $first           $second                $third             $fourth
		    #
		    nested_fs=$third
		fi
		#
		if [[ -n $nested_fs ]]
		then
		    #
		    :   Record new nested file system $nested_fs
		    #
		    discovered_fs="$discovered_fs $nested_fs"
		fi
	    done
	fi
    done

    #
    :	Pass comprehensive list to stdout, sorted to get correct unmount order
    #
    print -- "$given_fs_list" "$discovered_fs" | tr ' ' '\n' | sort -ru
}


###############################################################################
#
#   Function:	    parse_procfiles
#
#   Description:    Given a process id, find all the files held open by that
#		    process, and write the names to stdout.
#
#		    The purpose of this routine is to provide a reliable record
#		    of processes that were killed in order to make unmount
#		    possible.  A well-written application server would have 
#		    ensured, by completion of the stop_server method, that no
#		    processes were using the shared file system.  Alas, not 
#		    all application servers are well-written.
#
#   Input:	    Process ID
#
#   Output:	    List of files, as tracked by the 'procfiles' command, held
#		    open by the given process, written to stdout
#
###############################################################################
function parse_procfiles
{
[[ "$VERBOSE_LOGGING" == "high" ]] && set -x

typeset line
typeset first_line
typeset second_line
typeset major
typeset minor
typeset inode
typeset base
integer pid=$1

LC_ALL=C procfiles -n $pid |
while IFS='' read line
do
    #
    :	First and second lines passed through unexamined
    :	since they do not contain file information
    #
    if [[ -z $first_line ]]
    then
	first_line="Done"
    elif [[ -z $second_line ]]
    then
	second_line="Done"
    else
	#
	:   Lines after second should contain information
	:   about files open in process $pid
	#
	if print -- "$line" | grep -q 'dev:'
	then
	    #
	    :	If the line contains device information, remember it, 
	    :	in case we need it to determine the file name 
	    #
	    print -- "$line" | sed -n 's/^.* dev:\([0-9]*\),\(-*[0-9]*\) ino:\([0-9]*\) .*/\1 \2 \3/p' | read major minor inode
	else
	    #
	    :	Look for lines that procfiles could not 
	    :	turn into a file name
	    #
	    if print -- "$line" | grep -q 'name:Cannot be retrieved'
	    then
		#
		:   The procfiles output does not contain the file name.
		:   See if we can determine it from inode, major and minor
		:   number collected above.
		#
		#   If the file descriptor corresponds to an unnamed FIFO,
		#   then naturally no name will be found
		#
		if [[ -n $major && -n $minor && -n $inode ]] && \
		    (( $major != 65535 && $major != 268435455 && $minor != 65535 && $minor != -1 ))
		then
		    #
		    :	Find the block device corresponding to the
		    :	extracted major and minor numbers
		    #
		    dev_line=$(ls -l /dev | grep -w "^b.*${major}, *${minor}" )
		    if [[ -n $dev_line ]]
		    then
			#
			:   Logical volume name is the last item on the line
			#
			lv_name=$(set -- $dev_line ; eval print \${$#} )
		    fi
		    if [[ -n $lv_name ]]
		    then
			#
			:   Find the file system mounted from the logical volume
			#
			mount | grep -w $lv_name | read skip base skip
			if [[ -n $base ]]
			then
			    #
			    :	Finally, find the file corresponding to the
			    :	extracted inode number
			    #
			    fname=$(find $base -xdev -inum $inode)
			fi
		    fi
		fi
		if [[ -n $fname ]]
		then 
		    #
		    :	If we got a file name, print it
		    #
		    line=$(print -- "$line" | sed "s#name:Cannot be retrieved#${fname}#")
		    fname=""
		fi
	    fi
	    #
	    :	Clean up to avoid accidental reuse
	    #
	    major=""
	    minor=""
	    inode=""
	fi
    fi
    #
    :	Print line from procfiles, possibly with file name updated
    #
    print -- "${line}"
done
}


###############################################################################
#
#  Name:  fs_umount
#
#  fs_umount will kill all the processes related to the file system
#  and unmount the file system.
#  status will be appended to the status file passed as a parameter
#
#  Returns:
#       0 - filesystem successfully unmounted
#       1 - filesystem failed to unmount or failed to get logical volume
#
#  Arguments:
#       $1 - File system to unmount
#       $2 - Program name
#       $3 - Status file
#
#  Environment: VERBOSE_LOGGING, PATH
#
# Questions? Comments? Expressions of Astonishment?   mailto:hafeedbk@us.ibm.com
#
###############################################################################

function fs_umount
{
    typeset PS4_TIMER="true"
    [[ "$VERBOSE_LOGGING" == "high" ]] && set -x

    typeset FS="$1"
    typeset PROGNAME="$2"
    typeset TMP_FILENAME="$3"
    typeset WPAR_ROOT=$(clwparroot $GROUPNAME)
    integer STATUS=0
    typeset lv
    typeset fs_type
    typeset count
    typeset line
    integer RC=0
    typeset pid
    typeset pidlist
    typeset lv_lsfs
    typeset disable_procfile_debug="false"
    typeset crossmount_rg 

    #
    : Fetch filesystem type and unmount nfs filesystem
    #
    fs_type=$(mount | awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=$FS)
    if [[ $fs_type == nfs* ]]
    then
       #
       : unmount nfs filesystem
       #
       if ! umount $FS
       then
          #
          : failed to unmount of nfs filesystem $FS.. Try force unmount
          #
          if ! umount -f $FS
          then
             cl_log 24 "$PROGNAME: Failed to unmount NFS filesystem $FS .\n" $PROGNAME $FS
             STATUS=1    # note error

             #
             :   append status to the status file
             #
             print -- $STATUS $FS >>/tmp/$TMP_FILENAME
             cl_RMupdate resource_error $FS $PROGNAME
             return $STATUS
          fi
       fi

       #
       :    append status to the status file
       #
       print -- $STATUS $lv $FS >>/tmp/$TMP_FILENAME
       return $STATUS
    fi

    #
    :	Get the logical volume associated with the filesystem
    #
    if ! lv_lsfs=$(lsfs -c "$FS") 
    then
        cl_log 24 "$PROGNAME: Failed obtaining logical volume for $FS from ODM.\n" $PROGNAME $FS
        STATUS=1    # note error
        cl_RMupdate resource_error $FS $PROGNAME

        #
        :   append status to the status file
        #
        print -- $STATUS $FS >>/tmp/$TMP_FILENAME 
	cl_RMupdate resource_error $FS $PROGNAME
        return $STATUS
    fi

    #
    :	Get the logical volume name and filesystem type
    #
    print "$lv_lsfs" | tail -1 | IFS=: read skip lv fs_type rest

    #
    :    For WPARs, find the real file system name
    #
    [[ -n "$WPAR_ROOT" ]] && FS=${WPAR_ROOT}${FS}

    #
    :    Check to see if filesystem is mounted.
    #
    FS_MOUNTED=$(LC_ALL=C mount | awk '{ if ( $1 == "'$lv'" ) print $2 }' )
    if [[ -n "$FS_MOUNTED" ]]
    then
        if [[ "$FS" != "$FS_MOUNTED" ]]
        then
            #
            :	unmount $FS currently mounted for a given logical volume $lv
            # Make sure that we unmount the right file system i.e currently
            # mounted file system for a given logical volume. The idea is to
            # fetch the mounted file system from 'mount' command. Note that
            # the mount point may have been changed in between and hence it
            # is appropriate to unmount the one which is actually mounted.
            #
            FS=$FS_MOUNTED
        fi
	if [[ $FS == "/" || $FS == "/usr" || $FS == "/dev" || $FS == "/proc" || $FS == "/var" ]]
	then
	    #
	    :   Last ditch sanity check.  If some munging of ODM is about to lead us to kill
	    :   all the processes that use critical system file systems, lets walk back from
	    :   that edge.
	    #
	    cl_log 999999 "${PROGNAME}: Cannot safely unmount logical volume $lv on mount point $FS" $PROGNAME $lv $FS
	    cl_echo 25 "$PROGNAME: Failed umount of $FS.\n" $PROGNAME $FS
	    STATUS=1			#   note error
	    print -- $STATUS $lv $FS >>/tmp/$TMP_FILENAME 
	    cl_RMupdate rg_error $GROUPNAME $PROGNAME
	    return $STATUS              #   Bail, and head for an event error
	fi
    
        # Format for consumption by cl_am utility
        amlog_trace $AM_FS_UMOUNT_BEGIN "Deactivating Filesystem|$FS"
	#
	:   Try up to $LIMIT times to unmount $FS
	#
        for (( count=1 ; count <= $LIMIT ; count++ )) 
        do
            #
            :    Attempt $count of $LIMIT to unmount at $(date "+%h %d %H:%M:%S.000")
            #
            if umount $FS 
            then
                #
                :   Unmount of $FS worked.  Can stop now.
                #
                break
            else
                #
                :   At this point, unmount of $FS has not worked.  Attempt a SIGKILL to
                :   all processes having open file descriptors on this LV and FS.
                #
                date "+%h %d %H:%M:%S.000"

                #
                #   procfiles command can hang, when NFS crossmounts are
                #   configured and FS_BEFORE_IPADDR is enabled. checking 
                #   and avoid calling procfiles in this scenario...
                #

                crossmount_rg=$(clodmget -n -q "name=MOUNT_FILESYSTEM" -f group  HACMPresource)

                for rg in $crossmount_rg
                do
                  if [[ $(clodmget -n -f value -q "group=$rg and name=FS_BEFORE_IPADDR" HACMPresource) == "true" ]]
                  then
                      disable_procfile_debug="true"
                      break
                  fi
                done

                #  when PowerHA applications are running on external nfs file system and
                #  this is not configured under PowerHA to avoid hang
                #  disable calling the parse_procfiles.

                LC_ALL=C mount | awk '$4~ /nfs/ { print $4} '  | grep -iq "nfs"
                if (( $? == 0 )) ; then
                    disable_procfile_debug="true"
                fi


		#
		:   Record the open files on $lv and $FS, and the processes that we are
		:   about to kill.
		#
		pidlist=$(fuser $O_FLAG -u -x $lv 2>/dev/null)
		for pid in $pidlist
		do
		    #
		    :   Process $pid has open files on $FS.  Record information about this
		    :   process in case anyone is later suprised by this action.
		    #
		    ps ewwww $pid
		    if [[ $disable_procfile_debug != "true" ]]
		    then
                        : show files being used by $pid: Run as a coprocess, in the background
                        parse_procfiles $pid 2>&1 |&
                        # The value for DELAY of 15 seconds is an educated guess,
                        # but the debug trace will show the total runtime so we
                        # can adjust this estimate in the future
                        typeset DELAY=15
                        typeset bg_pid=$!

                        #
                        : SECONDS is a default shell veriable and its current value is $SECONDS
                        : Wait up to "$DELAY" seconds for process to complete
                        #
                        ((END=SECONDS+DELAY))
                        #The "while" loop sends all output to /dev/null to avoid spraying the logfile with unneeded tracing.
                        while ((END>SECONDS)); do
                          ps $bg_pid || break
                          sleep 1
                        done >/dev/null 2>&1

                        : Make sure the process is ended
                        ps $bg_pid && kill -9 $!
                        RUNTIME=$(((SECONDS-END+DELAY)))
                        #
                        : Total runtime of procfiles RUNTIME=$RUNTIME
                        : Show the procfiles output, or flush it if the process is already gone.
                        #
                        ps $bg_pid >/dev/null && cat <&p >/dev/null || cat <&p
		    fi
		done
		fuser $O_FLAG -k -u -x $lv	    #	Kill everything that has the logical volume open
		fuser $O_FLAG -k -u -x -c $FS	    #	Kill everything that has the file system open
		date "+%h %d %H:%M:%S.000"

		#
		:   Wait $SLEEP seconds for the kills to be effective
		#
		if [[ -n $pidlist ]]
		then
		    sleep $SLEEP
		fi
		if umount $FS
		then
		    #
		    :   Unmount of $FS worked.  Can stop now.
		    #
		    break
		fi

		if (( count == 1 ))
		then
		    #
		    :   At this point, asking politely has not worked.  Check to see if there is
		    :   something wrong with the volume group.  If so, abort the retries and go
		    :   directly to a forced unmount.
		    #
		    #	Check only done on first pass through loop.
		    #
		    lvn=${lv##*/}					    #	Trim any '/dev'
		    vg_name=$(clodmget -q "name = $lvn" -f parent -n CuDv)  #	Get owning volume group
		    if [[ -n $vg_name ]] &&
		        lsvg -o | grep -qx $vg_name &&
		        ! cl_vgsa_onetime $vg_name >/dev/null 2>&1
		    then
			#
			:   Volume group $vg_name holding $FS is online,
			:   but not readable.  This bodes ill for umount.
			#
			count=$LIMIT					    #	Go straight to force
		    fi
		fi

		if (( count >= LIMIT-3 ))
		then
		    #
		    :   Normal unmount of $FS failed.  If the force option can be used, try it here.
		    #
		    if [[ -n $FORCE_OK && "jfs2" == $fs_type ]]
		    then
			if umount -f $FS
			then
			    #
			    :   Forced option of unmount $FS worked
			    #
			    break
			fi
			#
			:    Even a forced unmount of $FS failed...
			#
		    fi
                fi
            fi

            if (( count == LIMIT/3 ))
            then
                #
                :   Nothing has worked so far, check NFS
                #
                if [[ ! -f /tmp/.RPCLOCKDSTOPPED ]]
                then
                    #
                    : It is possible the unmount failure is due to an NFS file lock.
                    #
                    if [[ -n $(odmget -q "name=EXPORT_FILESYSTEM and group=$GROUPNAME and value=$FS" HACMPresource) ]]
                    then
                        #
                        :   If the NFS file lock daemon has not been stopped already
                        :   and this FS is used for NFS exports, try stopping the lock daemon
                        #
                        stopsrc -s rpc.lockd
                        integer rcstopsrc=$?
                        if (( $rcstopsrc != 0 ))
                        then
                            : rc_stopsrc.rpc.lockd == $rcstopsrc
                        fi

                        #
                        : Using lssrc to detect process existence.
                        #
                        for (( LSSRC_COUNT=0; LSSRC_COUNT<$LIMIT; LSSRC_COUNT++ ))
                        do
                            LC_ALL=C lssrc -s rpc.lockd | tail -1 | read name subsystem pid state
                            if [[ ! -z $state ]]
                            then
                                sleep 1
                            else
                                break
                            fi
                        done

                        if [[ ! -z $state ]]
                        then
			    #
                            : Friendly stop has not worked. Try a more forceful method.
			    #
                            stopsrc -cs rpc.lockd

                            for (( LSSRC_COUNT=0; LSSRC_COUNT<15; LSSRC_COUNT++ ))
                            do
                                LC_ALL=C lssrc -s rpc.lockd | tail -1 | read name subsystem pid state
                                if [[ ! -z $state ]]
                                then
                                    sleep 1
                                else
                                    break
                                fi
                            done
                        fi

                        if [[ -z $state ]]
                        then
			    #
                            : Note that the lock daemon has been stopped.
			    #
                            touch /tmp/.RPCLOCKDSTOPPED
                        fi
                    fi
                fi
            fi

            if (( count == LIMIT ))
            then
                #
                :   Out of retries to unmount $FS.  Game over, man, game over.
                #
                cl_log 25 "$PROGNAME: Failed umount of $FS.\n" $PROGNAME $FS
                cl_RMupdate resource_error $FS $PROGNAME
		#
		#   Intent here is to force an event error, to prevent resource
		#   group movement to another node, and subsequent double mount
		#   of a filesystem.  However, if this is the last node to go 
		#   down, do not care so much.  This test could conceivably be
		#   made tighter, so that an event error is thrown only if the
		#   resource group is going to be moved to another node.
		#
		if [[ -n $POST_EVENT_MEMBERSHIP ]]
		then
		    STATUS=1
		    cl_RMupdate rg_error $GROUPNAME $PROGNAME
		else
		    STATUS=11
		fi
            fi
        done
        # Format for consumption by cl_am utility
        if [[ $STATUS != 0 ]]
        then
            amlog_err $AM_FS_UMOUNT_FAILURE "Deactivating Filesystem|$FS"
        else
            amlog_trace $AM_FS_UMOUNT_END "Deactivating Filesystem|$FS"
        fi
    else
	#
        :   File system $FS is not mounted
	#
    fi

    #
    :    append status to the status file
    #
    print -- $STATUS $lv $FS >>/tmp/$TMP_FILENAME
    
    
    
    return $STATUS
}


###############################################################################
#
#   Process resource list
#
###############################################################################
function deactivate_fs_process_resources
{
    typeset PS4_LOOP=""
    [[ "$VERBOSE_LOGGING" == "high" ]] && set -x

    integer STATUS=0

    #
    : for the temp file, just take the first rg name
    #
    print $RESOURCE_GROUPS | cut -f 1 -d ' ' | read RES_GRP
    TMP_FILENAME="$RES_GRP""_deactivate_fs.tmp"

    #
    : Remove the status file if already exists
    #
    rm -f /tmp/$TMP_FILENAME 

    #
    : go through all resource groups
    #
    pid_list=""
    for GROUPNAME in $RESOURCE_GROUPS ; do
        
        export GROUPNAME
        export RECOVERY_METHOD

        #
        :   Get a reverse sorted list of the filesystems in this RG so that they
        :   release in opposite order of mounting. This is needed for nested mounts.
        #
        print $FILE_SYSTEMS | IFS=: read LIST_OF_FILE_SYSTEMS_FOR_RG FILE_SYSTEMS
        LIST_OF_FILE_SYSTEMS_FOR_RG=$(print $LIST_OF_FILE_SYSTEMS_FOR_RG | tr ',' '\n' | sort -ru)
	LIST_OF_FILE_SYSTEMS_FOR_RG=$(find_nested_mounts "$LIST_OF_FILE_SYSTEMS_FOR_RG")

        #
        :   Get the recovery method used for all filesystems in this resource group
        #
        print $RECOVERY_METHODS | IFS=: read RECOVERY_METHOD RECOVERY_METHODS
        RECOVERY_METHOD=$(print $RECOVERY_METHOD | cut -f 1 -d ',')

        #
        :   verify the recovery method
        #
        RECOVERY_METHOD=${RECOVERY_METHOD# *}    # trim leading
        RECOVERY_METHOD=${RECOVERY_METHOD% *}    # and trailing blanks
    
        if [[ $RECOVERY_METHOD != "sequential" && $RECOVERY_METHOD != "parallel" ]]
        then
            cl_echo 95 "ERROR: Could not get the value of RECOVERY_METHOD" "RECOVERY_METHOD"
            RECOVERY_METHOD="sequential"
        fi

        #
        :   Tell the cluster manager what we are going to do
        #
        ALLFS="All_filesystems"
        cl_RMupdate resource_releasing $ALLFS $PROGNAME

        #
        : now that all variables are set, perform the umounts
        #
        for fs in $LIST_OF_FILE_SYSTEMS_FOR_RG
        do
            PS4_LOOP="$fs"
            if [[ $RECOVERY_METHOD == "parallel" ]]
            then
                fs_umount $fs $PROGNAME $TMP_FILENAME &
                pid_list="$pid_list $!"
            else
                fs_umount $fs $PROGNAME $TMP_FILENAME 
            fi
        done
        unset PS4_LOOP

    done # endof for GROUPNAME

    if [[ -n $pid_list ]]
    then
	#
	: wait to sync all the processes.
	#
        wait $pid_list
    fi
    
    for GROUPNAME in $RESOURCE_GROUPS ; do
        ALLNOERROR="All_non_error_filesystems"
        #
        :   update resource manager
        #
        cl_RMupdate resource_down $ALLNOERROR $PROGNAME
    done

    #
    :    Check to see how the unmounts went
    #
    if [[ -s /tmp/$TMP_FILENAME ]]
    then
        if grep -qw "^1" /tmp/$TMP_FILENAME
        then
            #
            :    At least one unmount failed, causing a resource group problem
            #
            STATUS=1
            cl_RMupdate rg_error $RES_GRP $PROGNAME
        elif grep -qw "^11" /tmp/$TMP_FILENAME
        then
            #
            :    Only have resource problems
            #
            STATUS=11
            cl_RMupdate rg_error $RES_GRP $PROGNAME
        else
            #
            :    All unmounts successful
            #
            STATUS=0
            rm -f /tmp/$TMP_FILENAME
        fi
    fi

    return $STATUS
}


###############################################################################
#
#   OEM filesystem deactivation
#
###############################################################################
function deactivate_oem_fs
{
    [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
    FS_LIST=$1
    integer RT=0

    for fs in $FS_LIST
    do
        OEM_TYPE=$(cl_get_oem_type -f $fs)
        OEM_METHOD_TO_DEAACTIVATE_FS=$(cl_get_oem_method -m "OFFLINE" -t $OEM_TYPE)
        $OEM_METHOD_TO_DEAACTIVATE_FS "$fs"
        RT=$?
        print -- $RT $fs >>/tmp/$TMP_FILENAME
    done
    return $RT
}


###############################################################################
#
# Start of main
#
###############################################################################

PROGNAME=${0##*/}
# Including Availability metrics library file
. /usr/es/lib/ksh93/availability/cl_amlib
if [[ $VERBOSE_LOGGING == high ]] 
then
    set -x
    version='1.6'
fi

integer STATUS=0
integer SLEEP=1
integer LIMIT=60
export SLEEP
export LIMIT
TMP_FILENAME="_deactivate_fs.tmp"

if (( $# != 0 )) && [[ $1 == "-c" ]]
then
    OEM_CALL="true"
    shift
else
    OEM_CALL="false"
fi

#
:   Check here to see if the forced unmount option can be used
#
export FORCE_OK=""
export O_FlAG=""

#
:   Each of the V, R, M and F fields are padded to fixed length,
:   to allow reliable comparisons.  E.g., maximum VRMF is
:   99.99.999.999
#
integer V R M F
typeset -Z2 R                       # two digit release
typeset -Z3 M                       # three digit modification
typeset -Z3 F                       # three digit fix
integer jfs2_lvl=601002000          # minimum JFS2 level needed for forced unmount
integer fuser_lvl=601004000	    # Level of fuser that supports '-O'
integer VRMF=0

#
:   Here try and figure out what level of JFS2 is installed
#
lslpp -lcqOr bos.rte.filesystem | cut -f3 -d':' | IFS=. read V R M F
VRMF=$V$R$M$F                       # get the JFS2 level

if (( $VRMF >= $jfs2_lvl ))
then
    #
    :    JFS2 at this level that supports forced unmount
    #
    FORCE_OK="true"
fi

if (( $VRMF >= $fuser_lvl ))
then
    #
    :	fuser at this level supports the '-O' flag
    #
    O_FLAG="-O"
fi

#
:   if JOB_TYPE is set and is not "GROUP", then process_resources is parent
#
if [[ ${JOB_TYPE:-0} != 0 && $JOB_TYPE != "GROUP" ]]; then
    deactivate_fs_process_resources
    exit $?
fi

if (( $# == 0 ))
then
    cl_echo 26 "usage: $PROGNAME filesystems_to_unmount\n" $PROGNAME
    exit 2
fi

#
:   At this point, we have an explicit list of filesystems to unmount
#

#
:   Get the resource group name from the environment
#
RES_GRP="$GROUPNAME"
TMP_FILENAME="$RES_GRP""_deactivate_fs.tmp"

#
:   Remove the status file if already exists
#
rm -f /tmp/$TMP_FILENAME 

#
:   if RECOVERY_METHOD is null get from ODM
#
if [[ -z $RECOVERY_METHOD ]]
then
    RECOVERY_METHOD=$(clodmget -q "name=RECOVERY_METHOD AND group=$RES_GRP" -f value -n HACMPresource)
fi

#
:   verify the recovery method
#
RECOVERY_METHOD=${RECOVERY_METHOD# *}    # trim leading
RECOVERY_METHOD=${RECOVERY_METHOD% *}    # and trailing blanks

if [[ $RECOVERY_METHOD != "sequential" && $RECOVERY_METHOD != "parallel" ]]
then
    cl_echo 95 "ERROR: Could not get the value of RECOVERY_METHOD" "RECOVERY_METHOD"
    RECOVERY_METHOD="sequential"
fi

# set -u will report an error if any flag used in the script is not set
set -u

#
:   Are there any exports?
#
if [[ -n $EXPORT_FILESYSTEM || -n $EXPORT_FILESYSTEM_V4 ]]
then
    #
    :    Remove the NFS exports before unmounting filesystems
    #
    cl_unexport_fs "$EXPORT_FILESYSTEM" "$EXPORT_FILESYSTEM_V4"
fi

#
:   Reverse the order of the FS list, to unmount in the opposite
:   order from mounting.  Important for nested mounts.
#
FILELIST=$(print $* | tr ' ' '\n' | /bin/sort -r)

#
:   update resource manager - file systems being released
#
ALLFS="All_filesystems"
cl_RMupdate resource_releasing $ALLFS $PROGNAME
pid_list=""

#
:   Check for nested mounts not under our control
#
FILELIST=$(find_nested_mounts "$FILELIST")

if [[ "$OEM_CALL" == "true" ]]
then
    deactivate_oem_fs "$FILELIST"
else
    #
    :	Loop through and unmount all file systems
    #
    for fs in $FILELIST
    do
        if [[ $RECOVERY_METHOD == "parallel" ]]
        then
            fs_umount $fs $PROGNAME $TMP_FILENAME &
            pid_list="$pid_list $!"
        else
            fs_umount $fs $PROGNAME $TMP_FILENAME 
        fi
    done
fi

if [[ -n $pid_list ]]
then
    #
    :   wait to sync all the processes.
    #
    wait $pid_list
fi

#
:   update resource manager - file systems released
#
ALLNOERROR="All_non_error_filesystems"
cl_RMupdate resource_down $ALLNOERROR $PROGNAME

#
:    Check to see how the unmounts went
#
if [[ -s /tmp/$TMP_FILENAME ]]
then
    cp /tmp/$TMP_FILENAME $(clodmget -f value -n -q "name = hacmp.out" HACMPlogs)
    if grep -qw "^1" /tmp/$TMP_FILENAME
    then
        #
        :   At least one unmount failed, causing a resource group problem
        #
        STATUS=1
        cl_RMupdate rg_error $RES_GRP $PROGNAME
    elif grep -qw "^11" /tmp/$TMP_FILENAME
    then
        #
        :    Only have resource problems
        #
        STATUS=11
        cl_RMupdate rg_error $RES_GRP $PROGNAME
    else
        #
        :   All unmounts successful
        #
        STATUS=0
    fi
    rm -f /tmp/$TMP_FILENAME
fi

exit $STATUS
