#!/bin/ksh93
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/events/get_disk_vg_fs.sh 1.98 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1990,2014 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)72  1.98  src/43haes/usr/sbin/cluster/events/get_disk_vg_fs.sh, hacmp.events, 61haes_r714, 1415A_hacmp714 2/10/14 21:30:07
#########################################################################
#                                                                       #
#       Name:           get_disk_vg_fs                                  #
#                                                                       #
#       Component:      hacmp.events                                    #
#                                                                       #
#       Description:    This script makes disk(s) available, varies   	#
#                       on volume group(s), and mounts filesystem(s).	#
#			If only given filesystem(s), the script	        #
#			figures out the associated disk(s) and volume   #
#			group(s) automatically. If only	given volume	#
#			group(s), the script figures out the	        #
#			associated disk(s) automatically.  And given no #
#                       input, the script does nothing.                 # 
#                       						#
#                       						#
#	Called by:	node_up_local, node_down_remote			#
#									#
#	Calls to:	cl_fs2disk, cl_disk_available, cl_activate_vgs,	#
#			cl_activate_fs					#
#									#
#       Arguments:      filesystem(s) volume-group(s) pvid(s)           #
#                                                                       #
#                       Each of these is a quoted string that may       #
#                       contain multiple sets of blank delimited names. #
#                       The string must be present, but may be null.    #
#                       E.g.,                                           #
#                                                                       #
#                       get_disk_vg_fs "/foo /bar" "vg01 vg02" ""       #
#                                                                       #
#       Returns:        0       success                                 #
#                       1       failure                                 #
#			2	bad argument				#
#                                                                       #
#########################################################################

get_mntpt_for_lv () {
    { lsfs -c /dev/${1} | tail -n 1 | cut -f1 -d':' ; } 2>/dev/null
}

#########################################################################
#
:   Main Starts Here
#
#########################################################################

PROGNAME=${0##*/}
export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
if [[ $VERBOSE_LOGGING == "high" ]]
then
    set -x
    version='1.98 $Source$'
fi

. /usr/es/sbin/cluster/events/reconfig_udresources

STATUS=0

FILE_SYSTEMS=$1			# file systems in the resource group
VOLUME_GROUPS=$2		# volume groups in the resource group
PVID_LIST=$3			# physical volumes in the resource group
OEM_FILE_SYSTEMS=$4             # The OEM filesystems for the group
OEM_VOLUME_GROUPS=$5            # The OEM volume groups
HDISK_LIST=""			# hdisks containing all of the above
UNMOUNTED_FS=""			# file systems needing mounting
OEM_UNMOUNTED_FS="" 
INACTIVE_VGS=""			# volume groups to be vary'd on
sddsrv_off=FALSE		# flag turned off sddsrv daemon 
integer SKIPBRKRES=0		# RR method said to skip breaking reserves
integer SKIPVARYON=0		# We should not do the varyon if 1
integer DEF_VARYON_ACTION=0     # RR method said override def varyon action
HACMPREPRESOURCE_ODM=$(odmget HACMPrepresource 2>/dev/null) # does not exist in all env.

if [[ -n $FILE_SYSTEMS ]]     # If file systems were specified
then
    #
    :	If no volume group names were passed in, retrieve them from
    :	the resource group defintion
    #
    if [[ -z $VOLUME_GROUPS ]]  # But volume groups were not specified
    then
	VOLUME_GROUPS=$(clodmget -q"group=$GROUPNAME AND name like '*VOLUME_GROUP'" -f value -n HACMPresource)
    fi

    # 
    : If filesystems are given and not already mounted,
    : figure out the associated volume groups
    #
    if [[ $FILE_SYSTEMS != "ALL" ]]	# File systems listed, not just "ALL" 
    then
	mount 2>/tmp/mount.err | awk '$3 ~ /jfs2*$/ {print $2}' > /tmp/mount1.out.$$ 
	for fs in $FILE_SYSTEMS 
	do
	    if grep -qx $fs /tmp/mount1.out.$$
	    then
		#
		:   This one is already mounted, skip it
		#
		continue
	    else
		VOLUME_GROUPS="$VOLUME_GROUPS $(cl_fs2disk -v $fs)"
		UNMOUNTED_FS="$UNMOUNTED_FS $fs"
	    fi
	done

	if [[ -s /tmp/mount.err ]]
	then
	    #
	    :	Print any errors from the mount command
	    #
	    print -u2 "stderr: mount"
	    cat >&2 /tmp/mount.err
	fi
    fi
fi

#
:   Process OEM Volume Groups in a similar manner
#
if [[ -n "$OEM_FILE_SYSTEMS" && "$OEM_FILE_SYSTEMS" != "ALL" ]]
then
    #
    :	Determine list of OEM volume groups from OEM Filesystems
    #
    for OEM_FILE_SYSTEM in $OEM_FILE_SYSTEMS 
    do
	#
	:   Determine the associated volume groups.  To do so,
	:   Get the FS OEM Type, and then the method to list host VGs
	#
	OEM_FS_TYPE=$(cl_get_oem_type -f $OEM_FILE_SYSTEM);
        if [[ -z $OEM_FS_TYPE ]] 
	then
            echo "ERROR: Unable to get OEM Filesystem Type for $OEM_FILE_SYSTEM"
	    STATUS=1
	else
	    OEM_FS2VG_METHOD=$(cl_get_oem_method -t $OEM_FS_TYPE -m LSVOLUMES);
            if [[ -z $OEM_FS2VG_METHOD ]] 
	    then
                echo "ERROR: Unable to get OEM Method to list host volume group for filesystem $OEM_FILE_SYSTEM"
	        STATUS=1
	    fi
	fi

        OEM_VOLUME_GROUPS_FROM_FILESYSTEMS=$($OEM_FS2VG_METHOD $OEM_FILE_SYSTEM);
        if [[ -z $OEM_VOLUME_GROUPS_FROM_FILESYSTEMS ]]
	then
            echo "ERROR: Unable to get OEM host volume group for filesystem $OEM_FILE_SYSTEM"
	    STATUS=1
	fi
    done
fi

#
:   Process volume groups
#
if [[ -n $VOLUME_GROUPS ]] 
then
    lsvg -L -o > /tmp/lsvg.out.$$ 2> /tmp/lsvg.err
    for vg in $VOLUME_GROUPS
    do
	#
	:   Check to see if the volume group is both vary\'d on, and readable
	:   by LVM - e.g., not closed due to lack of quorum.
	#
	if grep -qx $vg /tmp/lsvg.out.$$ &&
	   lqueryvg -g $(getlvodm -v $vg) >/dev/null 2>&1
	then
	    #
	    :	This one is already varied on, skip it
	    #
	    continue
	elif LC_ALL=C lsvg -L $vg 2>/dev/null | grep -i -q 'passive-only'
	then
	    #
	    :	This one is already varied on in passive mode.  The disks are
	    :	already locally accessable, so do not need to have reserves
	    :	broken.  However, the volume group does have to be moved to
	    :	the active state, so add it to the list of inactive ones to
	    :	activate.
	    #
	    INACTIVE_VGS="$INACTIVE_VGS $vg"
	else
	    #
	    :	Append to the previous HDISK_LIST list.
	    #
	    HDISK_LIST="$HDISK_LIST $(cl_fs2disk -pg $vg)"
	    INACTIVE_VGS="$INACTIVE_VGS $vg"
	fi
    done

    if [[ -s /tmp/lsvg.err ]]
    then
	#
	:   Print any lsvg error messages
	#
	print -u2 "stderr: lsvg -o"
	cat /tmp/lsvg.err >&2
    fi

    #
    :	Remove any duplicates from the list of volume groups to vary on
    #
    INACTIVE_VGS=$(echo $INACTIVE_VGS | tr ' ' '\n' | sort -u)
fi


#
:   Get OEM Volume groups that are not already active
#
ALL_OEM_VOLUME_GROUPS="$OEM_VOLUME_GROUPS_FROM_FILESYSTEMS $OEM_VOLUME_GROUPS"

if [[ -n "$ALL_OEM_VOLUME_GROUPS" ]] 
then

    for vg in $ALL_OEM_VOLUME_GROUPS
    do
	#
        :   get OEM type
	#
        OEM_TYPE=$(cl_get_oem_type -v $vg);
        if [[ -n "$OEM_TYPE" ]]
        then
	    #
            :	Get OEM method to determine VG status
	    #
            OEM_VG_STATUS=$(cl_get_oem_method -m "STATUS" -t "$OEM_TYPE");
	
	    #
	    :	Check status of OEM VG
	    #
	    if [[ -n $OEM_VG_STATUS ]]
	    then
                ($OEM_VG_STATUS $vg)
		VG_STATUS=$?
            else
                echo "ERROR: Unable to get OEM status for $vg."
	    fi

	    #
	    :	If inactive, add to list
	    #
	    if (( $VG_STATUS == 0 ))
	    then
                OEM_INACTIVE_VGS="$OEM_INACTIVE_VGS $vg"
	    fi
	    
	    #
	    :	Get method to list HDISKS hosting the Volume Group
	    #
	    OEM_LIST_HDISKS=$(cl_get_oem_method -m "LSHDISKS" -t "$OEM_TYPE");
	    if [[ -n $OEM_LIST_HDISKS  ]]
	    then
		#
	        :   Get HDISKS
		#
	        OEM_HDISK=$($OEM_LIST_HDISKS);
		#
	        :   Add to list of HDISKS to process later
		#
	        HDISK_LIST="$HDISK_LIST $OEM_HDISK"
            else
                echo "ERROR: Unable to get OEM method to list host disks for $vg."
	    fi
        else
            echo "ERROR: Unable to get OEM type for $vg."
            STATUS=1
        fi
    done

    #
    :	Remove any duplicates from the list of volume groups to vary on
    #
    OEM_INACTIVE_VGS=$(echo $OEM_INACTIVE_VGS | tr ' ' '\n' | sort -u);
fi


#
:   Call replicated resource predisk-available method associated
:   with any replicated resource defined in the resource group
:   we are currently processing. 
#
if [[ -n $INACTIVE_VGS && -n $(odmget HACMPrresmethods) || -n $HACMPREPRESOURCE_ODM ]]
then
    #
    :   clsetrepenv utility sets up the environment for replicated methods.
    #
    set -a
    eval $(clsetrepenv $GROUPNAME)
    set +a
    METHODS=$(cl_rrmethods2call predisk_available)

    for method in $METHODS
    do
	if [[ -x $method ]]
	then
	    $method $INACTIVE_VGS
	    
	    case $? in
	    0)
		continue
		;;
	    3)
		SKIPBRKRES=1
		;;
            4)
                export SKIP_FORCED_VARYON=true
                ;;
	    *)
		exit 1
		;;
	    esac 
	fi
    done
    #
    :   There are volume groups associated with replicated
    :   resources that are going to be varied on later on.
    :   Set up a passive varyon for all these volume groups
    :   on all the nodes at this site.
    #
    if [[ -z $LOCALNODENAME ]]
    then
	export LOCALNODENAME=$(get_local_nodename)
    fi
    this_site_nodes=$(cllssite -c | tail +2 | cut -f2 -d: | grep -w $LOCALNODENAME)
    this_site_nodes=$(IFS=, set -- $this_site_nodes | print "$*" )          # comma separated
    for VG in $INACTIVE_VGS
    do
	if LC_ALL=C lsvg -L $VG 2>/dev/null | grep -i -q 'passive-only'
	then
	    #
	    :   Already passively varied on, so skip
	    #
	    continue
	else
	    if [[ -z $(odmget -q "name = CONCURRENT_VOLUME_GROUP and value = $VG" HACMPresource 2>/dev/null) ]]
	    then
		#
		:   If necessary, convert this volume group to ECM
		:   and passively vary on the volume group $VG on all
		:   nodes on this site
		#
		cl_on_node -cspoc "-n $this_site_nodes" cl_pvo -v $VG
	    fi
	fi
    done
fi

#
:   If Physical Volume IDs are given, figure out associated DISKs.
#
if [[ -n "$PVID_LIST" ]]
then
    lspv -L > /tmp/lspv.out.$$ 2> /tmp/lspv.err 
    for pv in $PVID_LIST
    do
	#
	:   Append to the previous HDISK_LIST list.
	#
	HDISK_LIST="$HDISK_LIST $(grep -w $pv /tmp/lspv.out.$$ | cut -f1 -d' ')"
    done

    if [[ -s /tmp/lspv.err ]] 
    then
    	print -u2 "stderr: lspv"
	cat /tmp/lspv.err >&2
    fi
fi

#
:   Take out any duplicate items in disk, volume group, and file_systems lists,
:   Then call the individual script to make disks available, varyon volume
:   groups, and mount filesystems.
#
if [[ -n $HDISK_LIST ]]
then
    #
    :	Remove any duplicates that may have crept in
    #
    HDISK_LIST=$(echo $HDISK_LIST | tr ' ' '\n' | sort -u)

    #
    :   If the 'sddsrv' daemon is running - vpath dead path detection and
    :   recovery - turn it off, since interactions with the fibre channel
    :   device driver will, in the case where there actually is a dead path,
    :   slow down every vpath operation.
    #
    if echo $HDISK_LIST | grep -q vpath 
    then
	#
	:   Each of the V, R, M and F fields are padded to fixed length,
	:   to allow reliable comparisons.  E.g., maximum VRMF is
	:   99.99.999.999
	#
	integer V R M F
	typeset -Z2 R                       # two digit release
	typeset -Z3 M                       # three digit modification
	typeset -Z3 F                       # three digit fix
	integer VRMF=0

	#
	:   Check to see if we are running an early level of SDD, which
	:   requires this.
	#
	sdd_level=106003000
        if lslpp -lcq "devices.sdd.*.rte" | cut -f3 -d':' | IFS=. read V R M F
        then
           VRMF=$V$R$M$F               # get the SDD level
        fi

        if (( $R >= 07 )); then
            sdd_level=107002005
        fi

	#
	:   Check to see if SDD is active, and an early level
	#
	if (( $VRMF < $sdd_level )) &&
	    lssrc_out=$(LC_ALL=C lssrc -s sddsrv) 
	then
	    integer pid=0
	    print "$lssrc_out" | tail -1 | read subsys rest
	    (set -- $rest ; eval print \${$(($#-1))} \${$#}) | read pid state
	    if [[ $subsys == "sddsrv" && $state == "active" ]] &&
		(( $pid != 0 ))
	    then
		date                            # took how long to shut down SDD
		#
		: The stopsrc command does not include the -c flag for 2 reasons:
		: 1. The possible SIGKILL could result in "Invalid vpaths", and
		: 2. Time for the daemon to go inoperative could be several
		:    minutes in cases where many vpaths are not accessible
		#
		stopsrc -s sddsrv
		echo "$PROGNAME: Waiting for sddsrv to go inoperative. This could take several minutes when some vpaths are inaccessible.\n"
		#
		:   No need to clog the log file with this
		#
		set +x
		#
		#   Now wait for sddsrv to shut down
		#
                while [[ $subsys == "sddsrv" && $state != "inoperative" ]] ; do
                    sleep 1
                    if ! lssrc_out=$(LC_ALL=C lssrc -s sddsrv)
                    then
                        #
                        :   SRC stopped talking to us.  No longer wait for it
                        #
                        break
                    else
                        #
                        :   Pick up current state
                        #
			lssrc_out=$(LC_ALL=C lssrc -s sddsrv | tail -1)
			state=$(set -- $lssrc_out ; eval print \${$#})
                    fi
                done
		[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
		date                            # took how long to shut down SDD
		sddsrv_off=TRUE                 # Note that it was turned off
	    fi
	fi
    fi

    # 
    :	Break any reserverations, and make the disks available
    #
    if (( $SKIPBRKRES == 0 ))
    then
	if ! cl_disk_available "$HDISK_LIST"
	then
	    STATUS=1
	fi
    fi
fi

#
:   Call replicated resource prevg-online method associated with any
:   replicated resource that is a member of the resource group
:   we are currently processing. Note that a return code of 3 from
:   the prevg-online method indicates the default action should not
:   happen. The default action for the online_primary case is to 
:   varyon the VG. The default action for the online_secondary
:   case is to NOT varyon the VG
#
if [[ -n $INACTIVE_VGS && -n $(odmget HACMPrresmethods) || -n $HACMPREPRESOURCE_ODM ]]
then
    #
    :   clsetrepenv utility sets up the environment for replicated methods.
    #
    set -a
    eval $(clsetrepenv $GROUPNAME)
    set +a
    METHODS=$(cl_rrmethods2call prevg_online)

    for method in $METHODS
    do
	if [[ -x $method ]]
	then
	    $method $INACTIVE_VGS
	     
	    case $? in
	    0)
		continue
		;;
	    3)
		DEF_VARYON_ACTION=1
		;;
	    *)
		exit 1
		;;
	    esac 
	fi
    done
fi

if [[ $PRINCIPAL_ACTION == "ACQUIRE" ]]
then
    #
    :	This is an online_primary case so an override
    :	from the RR method means we skip the varyon
    :	since the default action is to varyon
    #
    SKIPVARYON=0
    
    if (( $DEF_VARYON_ACTION == 1 )) 
    then
        SKIPVARYON=1 
    fi
else
    #
    :	This is the online_secondary case so an override
    :	from the RR method means we do the varyon
    :	since the default action is to skip the varyon
    #
    SKIPVARYON=1
    
    if (( $DEF_VARYON_ACTION == 1 ))
    then
	SKIPVARYON=0
    fi
fi

if [[ -n $INACTIVE_VGS ]]
then
    #
    :	Remove any duplicates from the list of volume groups to vary on
    #
    INACTIVE_VGS=$(echo $INACTIVE_VGS | tr ' ' '\n' | sort -u)

    #
    :	Vary on the volume groups, making any ODM updates necessary
    #
    if (( $SKIPVARYON == 0 ))
    then
	if ! cl_activate_vgs -n "$INACTIVE_VGS"
	then
	    exit 1
	fi
    fi
fi

if [[ -n $OEM_INACTIVE_VGS ]]
then
    #
    :	Remove any duplicates from the list of volume groups to vary on
    #
    OEM_INACTIVE_VGS=$(echo $OEM_INACTIVE_VGS | tr ' ' '\n' | sort -u)

    #
    :	Activate OEM Volume Groups
    #
    if ! cl_activate_vgs -n -c "$OEM_INACTIVE_VGS"
    then
        STATUS=1
    fi
fi

#
:   Call replicated resource postvg-online method associated
:   with any replicated resource defined in the resource group
:   we are currently processing.  
#
if [[ -n $INACTIVE_VGS && -n $(odmget HACMPrresmethods) || -n $HACMPREPRESOURCE_ODM ]]
then
    #
    :   clsetrepenv utility sets up the environment for replicated methods.
    #
    set -a
    eval $(clsetrepenv $GROUPNAME)
    set +a
    METHODS=$(cl_rrmethods2call postvg_online)

    for method in $METHODS
    do
	if [[ -x $method ]]
	then
	    if ! $method $INACTIVE_VGS
	    then
		exit 7
	    fi
	fi
    done
fi

#
:   If there are any udresources to be processed after volumegroup, process those.
#
acquire_udresources AFTER_VOLUME_GROUP
RC=$?
: exit status of acquire_udresources is: $RC
if (( $RC != 0 ))
then
    echo "Failed to Start userdefined resources '${GROUPNAME}' "
    (( $STATUS == 0 )) && STATUS=1
fi

#
:  If ALL filesystems are specified, determine them from the volume groups
#
if [[ $FILE_SYSTEMS == "ALL" && -n $VOLUME_GROUPS ]]
then
    #
    :	save the mount info
    #
    mount 2>/tmp/mount.err | awk '$3 ~ /jfs2*$/ {print $2}' > /tmp/mount1.out.$$ 

    #
    :	Retrieve the file systems and their logical volume names from the
    :	given volume groups.
    #
    date
    ALL_LVs=""
    for VG in $VOLUME_GROUPS; do
	OPEN_LVs=$(clodmget -q "name = $VG" -f dependency -n CuDep)
	ALL_LVs="$ALL_LVs $OPEN_LVs"
    done
    if lslpp -l 'hageo.*' >/dev/null 2>&1 ||
       lslpp -l 'geoRM.*' >/dev/null 2>&1 
       then
	#
    	:   HAGEO is installed.  It uses resource groups containing volume
	:   groups, but for which its inappropriate for HACMP to mount the
	:   file systems.  Leave out from the list of file systems those
	:   which may a logical volume also handled by HAGEO.
	#
	for LV in $ALL_LVs; do
            fs=$(get_mntpt_for_lv ${LV})
            if [[ -n ${fs} ]]
	    then
	        if grep -qx $fs /tmp/mount1.out.$$
	        then
		    #
		    :	This one is already mounted, skip it
		    #
		    continue;
	        else
		    #
		    :	Now, check to see if Geo is using this one.  That is, is the
		    :	raw logical volume name for the file system a Geo
		    :	local_device.
		    #
		    if [[ -z $(odmget -q "attribute = local_device and value = /dev/r${LV}" CuAt) ]] 
		    then
			#
		        :   Only do the ones not used by Geo.
			#
		        UNMOUNTED_FS="$UNMOUNTED_FS $fs"
		    else
			#
		        :   Lets just make real sure that this logical volume is
		        :   used by Geo - get the name for that last entry, and
		        :   look up what kind of device it is in CuDv
			#
		        gmd=$(clodmget -q "attribute = local_device and value = /dev/r${LV}" -f name -n CuAt)
		        PdDvLn=$(clodmget -q "name = $gmd" -f PdDvLn -n CuDv)
		        if [[ $PdDvLn != "geo_mirror/gmd/lgmd" ]] 
			then
			    #
			    :   Whaterver it is, its not a GMD, so we can go with it
			    #
			    UNMOUNTED_FS="$UNMOUNTED_FS $fs"
			fi
		    fi
		fi
	    fi
	done
    else 
	#
    	:   HAGEO is not installed.  Pick up all the file systems.
	#
	for LV in $ALL_LVs; do
            fs=$(get_mntpt_for_lv ${LV})
            if [[ -n ${fs} ]]
	    then
	        if grep -qx $fs /tmp/mount1.out.$$
	        then
		    #
		    :	This one is already mounted, skip it
		    #
		    continue
	        else
		    UNMOUNTED_FS="$UNMOUNTED_FS $fs"
		fi
	    fi
	done
    fi
    date

    if [[ -s /tmp/mount.err ]]
    then
	#
	:   Print any error messages from mount
	#
    	print -u2 "stderr: mount"
	cat /tmp/mount.err >&2
    fi
fi

#
:   Mount those file systems needed for this resource group
#
if [[ -n "$UNMOUNTED_FS" ]]
then

    #
    :	Remove any duplicates from the list of filesystems to mount
    #
    UNMOUNTED_FS=$(echo $UNMOUNTED_FS | tr ' ' '\n' | sort -u)

    #
    :	Find all the JFS and JFS2 log devices - even if the volume group was
    :	already varyd on - and run logredo against each one to help to assure
    :	consistency of filesystems such that they will mount successfully.
    #
    VOLUME_GROUPS=$(echo $VOLUME_GROUPS | tr ' ' '\n' | sort -u)
    date
    logdevs=""
    ALL_LVs=""
    #
    :	odmget search pattern for both jfslog and jfs2log
    #
    pattern='jfs*log'
    for VG in $VOLUME_GROUPS
    do
	LVs=$(clodmget -q "name = $VG" -f dependency -n CuDep)
	ALL_LVs="${ALL_LVs} ${LVs}"
    done
    for LV in $ALL_LVs
    do
	if [[ -n $(odmget -q "name = ${LV} and \
	    attribute = type and \
	    value like ${pattern}" CuAt) ]]
	then
	    logdevs="${logdevs} /dev/${LV}"
	fi
    done
    HAVE_GEO=""
    if lslpp -l 'hageo.*' >/dev/null 2>&1 ||
       lslpp -l 'geoRM.*' >/dev/null 2>&1 
       then
	# 
       	:    HAGEO tends to create file systems where the log files are of
	:    type jfs or jfs2.  Catch all these by looking them up in
	:    /etc/filesystems
	# 
	HAVE_GEO="true"
	for LV in $ALL_LVs
	do
	    if [[ -n $(odmget -q "name = ${LV} and \
		attribute = type and \
		value like jfs*" CuAt) ]]
	    then
	        if grep -w /dev/${LV} /etc/filesystems | grep -qw log 
		then
		    logdevs="$logdevs /dev/${LV}"
		fi
	    fi
	done
	#
	:    Remove any duplicates acquired so far
	#
	logdevs=$(echo $logdevs | tr ' ' '\n' | sort -u)
    fi

    #
    :	JFS2 file systems can have in-line logs.  In this case, the log device
    :	is the same as the file system device.  
    #
    for LV in $ALL_LVs
    do
	if [[ -n $(odmget -q"name = ${LV} and \
	    attribute = type and \
	    value = jfs2" CuAt) ]]
	then
            fs=$(get_mntpt_for_lv ${LV})
            if [[ -n ${fs} ]]
	    then
		LOG=$(grep -w -p /dev/${LV} /etc/filesystems | awk '$1 ~ /log/ {printf $3}')
		if [[ $LOG == "INLINE" || $LOG == "/dev/${LV}" ]]
		then
	    	    logdevs="$logdevs /dev/${LV}"
		fi
	    fi
	fi
    done
    date

    #
    :	Run logredos in parallel to save time.
    #
    for dev in $logdevs
    do
    	if [[ -n $HAVE_GEO ]]
	then 
	    #
	    :	HAGEO or GeoRM is installed.  If Geo is mirroring the file
	    :	system log, we have to run logredo on the GMD, so that updates
	    :	get reflected to the remote site.
	    #
	    gmd=$(clodmget -q "attribute = local_device and value = /dev/r${dev##*/}" -f name -n CuAt)
	    if [[ -n $gmd ]] 
	    then
	    	dev="/dev/"$gmd
	    fi
	fi
	logredo $dev &
    done
    wait    #	for all the logredos to complete

    #
    :	Mount the file systems
    #
    if ! cl_activate_fs "$UNMOUNTED_FS"
    then
	STATUS=1
    fi
fi

#
:   Activate OEM filesystems
#
if [[ -n $ALL_OEM_VOLUME_GROUPS ]]
then
    if [[ $OEM_FILE_SYSTEMS == "ALL" ]]
    then
	#
	:   Check for "Mount All Filesystems"
	#
        OEM_FILE_SYSTEMS=""
        for OEM_VG in $ALL_OEM_VOLUME_GROUPS
        do
	    #
            :	Get the filesytems from the VG
	    #
            if ! OEM_FS=$(cl_get_oem_filesystems -v $OEM_VG)
            then
                OEM_FILE_SYSTEMS="$OEM_FILE_SYSTEMS $OEM_FS" # Create a list of fs to work with
            else
                echo "ERROR: Unable to get filesystems for $OEM_VG"
                STATUS=1
            fi
        done
    fi
fi

#
:   Remove any duplicates 
#
OEM_FILE_SYSTEMS=$(echo $OEM_FILE_SYSTEMS | tr ' ' '\n' | sort -u)

#
:   Now filter-out the filesystems that are already mounted
#
if [[ -n $OEM_FILE_SYSTEMS ]]
then
    mount 2>/tmp/oemmount.err | awk '{print $2}' > /tmp/oemmount1.out.$$ 
    for fs in $OEM_FILE_SYSTEMS 
    do
	#
        :   If a filesystem is already mounted then skip that one.
	#
        if grep -qx "$fs" /tmp/oemmount1.out.$$
        then
	     #
             :	This one is already mounted, skip it
	     #
	    continue
        else
             OEM_UNMOUNTED_FS="$OEM_UNMOUNTED_FS $fs"
        fi
    done
fi

#
:   Print any errors from mount command
#
if [[ -s /tmp/oemmount.err ]]  
then
    print -u2 "stderr: mount"
 cat >&2 /tmp/oemmount.err
fi

#
:   Process OEM filesystems
#
if [[ -n $OEM_UNMOUNTED_FS ]]
then
    if ! cl_activate_fs -c "$OEM_UNMOUNTED_FS"
    then
	STATUS=1
    fi
fi

#
:   Sync the active volume groups after the filesystems are mounted.
#
lsvg -L -o 2>/tmp/lsvg.err | sort > /tmp/lsvg.out.$$ 
for one_vg in $(echo $INACTIVE_VGS | tr ' ' '\n' | sort | comm -12 /tmp/lsvg.out.$$ - )
do
    cl_sync_vgs $one_vg &
done

#
:   If sddsrv was turned off above, turn it back on again
#
if [[ $sddsrv_off == TRUE ]] &&
    ! LC_ALL=C lssrc -s sddsrv | grep -iqw active 
    then
    startsrc -s sddsrv
fi

#
:   Print any errors from the lsvg command
#
if [[ -s /tmp/lsvg.err ]]
then
    print -u2 "stderr: lsvg -o"
    cat /tmp/lsvg.err >&2
fi

#
:   Cleanup temp files
#
if [[ $STATUS == 0 ]]
then
    rm -f /tmp/mount1.out.$$ /tmp/lsvg.out.$$ /tmp/lspv.out.$$ /tmp/oemmount1.out.$$ tmp/oemmount.err
else
    cp /tmp/mount1.out.$$ /tmp/lsvg.out.$$ /tmp/lspv.out.$$ /tmp/oemmount1.out.$$ tmp/oemmount.err /var/hacmp/log
fi

exit $STATUS