#!/bin/ksh93
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/cspoc/utilities/climportvg.sh 1.19.1.8 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1998,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)22  1.19.1.8  src/43haes/usr/sbin/cluster/cspoc/utilities/climportvg.sh, hacmp.cspoc, 61haes_r720, 1540B_hacmp720 9/30/15 12:38:08
###############################################################################
#
# Name:
#       climportvg.sh
#
# Description:
#    Performs a full import of a volume group.
#
#   Usage: climportvg [-V MajorNumber] [-y VolumeGroup]
#                     [-f] [-c] [-x] [-Q] PhysicalVolume
#
# Return Values:
#       0       Successfull import
#       1       Error condition
#
###############################################################################


# Including file containing SCSIPR functions
. /usr/es/sbin/cluster/events/utils/cl_scsipr_event_functions

function usage
{
    dspmsg -s 2 cspoc.cat 39 "Usage: climportvg [-V MajorNumber] -y VolumeGroup [-f] [-c] [-x] [-Q] PhysicalVolume\n"
    exit 1
}


###############################################################################
#
# Name:	    vg_fence
#
# Function: If appropriate, re-initialize the fence height for volume group $VG
#	    to 'read only'.  Fencing is set up only if 
#	    1) Cluster services are active, and
#	    2) The volume group is in a resource group
#	    Otherwise, fencing is not established, to leave the volume group
#	    free for other administrative operations.
#
# Input:    Volume group name in global variable $VG
#
###############################################################################
function vg_fence
{
    [[ $VERBOSE_LOGGING == "high" ]]  && set -x

    integer RC=0

    #
    :	First, check to see if $VG is in a resource group we know about.
    :	If not, do not fence it.
    #
    if [[ -z $(clodmget -q "value = $VG and name like '*VOLUME_GROUP'" -f group -n HACMPresource) ]]
    then
	return 0
    fi

    #
    :	Find out whether cluster services are active on this node
    #
    if  LC_ALL=C lssrc -ls clstrmgrES 2>&1 | grep "Current state:" | egrep -q -v "ST_INIT|NOT_CONFIGURED"
    then
	#
	:   Cluster services are active, and PowerHA manages $VG, so
	:   set up the fence group for the disks in $VG, and initialize 
	:   the fence height to 'read/only'
	#
	cl_vg_fence_redo -c $VG ro 
	RC=$?
	:   exit status of cl_vg_fence_redo $VG is $RC
	if (( 0 != $RC ))
	then
	    #
	    :   Log any error, but continue.  If this is a real problem,
	    :   manual intervention may be needed.
	    #
	    ro=$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)
	    dspmsg -s 38 cspoc.cat 10511 "$PROGNAME: Volume group $VG fence height could not be set to read only" $PROGNAME $VG $ro
	fi
    fi
    return $RC
}


###############################################################################
#
# Main Starts Here
#
###############################################################################

if [[ $VERBOSE_LOGGING == "high" ]]
then
    set -x
    version='1.19.1.8'
fi
PROGNAME=${0##*/}
PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
HA_DIR="$(cl_get_path)"

#
:   Check the number of arguments
#
(( $# < 2 || $# > 9 )) && {
    usage
}

O_flag=''
if [[ -n $(odmget -q "attribute = varyon_state" PdAt) ]]
then
    #
    :   Override any LVM volume group varyon state tracking
    #
    O_flag='-O'
fi

#
:   Parse the command line arguments and turn the command line arguments into
:   internal flags
#
typeset VG MAJOR CFLAG QFLAG SW 

while getopts ":y:V:fcCxQ" opt
do
    case $opt in
	y) VG="$OPTARG"	;;	    # volume group name
	V) MAJOR="-V $OPTARG" ;;    # major number
	c) CFLAG="-c" ;;	    # concurrent mode
	Q) QFLAG="-Q" ;;	    # with quorum on
	\?) usage ;;		    # invalid option flag
	*) SW="$SW -${opt}" ;;	    # other importvg flags
    esac
done

shift $(($OPTIND - 1))		    # skip past the options

#
:   The PVID should be the only thing left on the command line.  Get the
:   corresponding hdisk name.  Note that in the case of vpath disks, there may
:   be many, so only pick up the first
#
PVID=$1
lspv | grep -w $PVID | read DISK rest

if [[ -z $DISK ]] ; then
    #
    :	The given PVID is not known on this node
    #
    dspmsg -s 2 cspoc.cat 40 "Physical volume %s does not exist!\n" $PVID
    exit 1
fi

#
:   If the disk is not in the "available" state, or not readable
#
if [[ -z $(LC_ALL=C lsdev -Cc disk -l $DISK -S A -F 'status') ]] ||
   ! cl_querypv /dev/$DISK ; then
    #
    :	This may be due to ghost disks getting in the way.  See if we can get
    :	rid of them.  OTOH, if the volume group is varied on with reserves on
    :	another node, they are not going to appreciate what happens next.
    #
    cl_disk_available -s -v $DISK $([[ -n $VG ]] && cl_fs2disk -pg $VG 2>/dev/null)

    #
    :	Now, try that again
    #
    if [[ -z $(LC_ALL=C lsdev -Cc disk -l $DISK -S A -F 'status') ]] ||
       ! cl_querypv /dev/$DISK ; then
	#
	:   Still no go.  No further recovery.
	#
	dspmsg -s 2 cspoc.cat 40 "Physical volume $ID does not exist!\n" $ID
	exit 1
    fi
fi

#
:   Now, determine what type of import needs to take place here
#
STATE="$(clresactive -v $VG)"

case $STATE in

    "no" )
	#
	:   Volume group is not known on this node
	#
	if [[ -n "$CFLAG" ]]
	then
	    IMP_SW="-F $CFLAG"		# Import in concurrent mode
	else
	    IMP_SW="-F -n"		# Import w/o varyon
	fi

	if [[ -n "$QFLAG" ]]
	then
	    QUORUM="y"
	else
	    QUORUM="n"
	fi

	#
	:   Clean up any old fence group
	#
	cl_vg_fence_term -c $VG
	RC=$?
	:   exit status of cl_vg_fence_term $VG is $RC

	#
	:   Hence, we need to perform a full import of this volume group
	#
	importvg -y $VG $MAJOR $SW $IMP_SW -R $DISK || {
	    RC=$?
	    :	exit status of importvg is $RC
	    dspmsg -s 2 cspoc.cat 41 "importvg -y %s %s failed!\n" "$VG $MAJOR $SW $IMP_SW" $DISK
	    exit 1
	}

	#
	:   The import worked.  The volume group has to be varied on to be
	:   able to set other attributes.
	#
	if varyonvg $O_flag -n -u $VG		# nosync, unreserved
        then
            chvg -a'n' $VG			# turn off autovaryon
	    :	exit status of chvg is $?
            chvg -Q $QUORUM $VG		        # specified quorum flag
	    :	exit status of chvg is $?
            varyoffvg $VG			# release volume group
	    :	exit status of varyoffvg is $?
        fi

	#   Output the information into STDERR so that this gets logged into cspoc.log file in cl_importvg
	dspmsg -s 38 cspoc.cat 7 'Volume group %1$s has been imported.\n' $VG >&2

	#
	:   Set up SCSIPR fencing on the imported Volume Group.
	#
	if LC_ALL=C lssrc -ls clstrmgrES 2>&1 | grep "Current state:" | egrep -q -v "ST_INIT|NOT_CONFIGURED"
	then

	    #
	    :   Cluster services are active. 
	    #
	    typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge)
	    if [[ $SCSIPR_ENABLED == Yes ]]
	    then
		#
		:   Register and reserve the volume group, $VG using SCSIPR.
		#
		if ! cl_scsipr_dare_Reg_Res $VG
		then
		    #
		    :  Registrating the Volume Group failed.
		    :  Hence importvg will fail.
		    #
		    exit 1
		fi
	    fi
	fi

	#
	:   Set up the fence group for the disks in $VG
	:   and initialize the fence height to 'read/only'
	#
	vg_fence
	RC=$?
	:   return status of vg_fence is $RC

	exit 0
    ;;

    "inactive" )
	#
	:   Clean up any old fence group
	#
	cl_vg_fence_term -c $VG
	RC=$?
	:   exit status of cl_vg_fence_term $VG is $RC
	#
	:   The volume group is known, but not currently active. 
	:   Perform an update.
	#
	importvg -L $VG -R $DISK || {
	    :	exit status of importvg is $?
	    dspmsg -s 2 cspoc.cat 42 "importvg -L %s failed!\n" "$VG $DISK"
	    exit 1
	}

	#   Output the information into STDERR so that this gets logged into cspoc.log file in cl_importvg
	dspmsg -s 38 cspoc.cat 8 'Volume group %1$s has been updated.\n' $VG >&2
	
	#
	:   Set up SCSIPR fencing on the imported Volume Group.
	#
	if LC_ALL=C lssrc -ls clstrmgrES 2>&1 | grep "Current state:" | egrep -q -v "ST_INIT|NOT_CONFIGURED"
	then
	    #
	    :   Cluster services are active.
	    #
	    typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge)
	    if [[ $SCSIPR_ENABLED == Yes ]]
	    then
		#
		:   Register and reserve the volume group, $VG using SCSIPR.
		#
		if ! cl_scsipr_dare_Reg_Res $VG
		then
		    #
		    :  Registrating the Volume Group failed.
		    :  Hence importvg will fail.
		    #
		    exit 1
		fi
	    fi
	fi

	#
	:   Set up the fence group for the disks in $VG
	:   and initialize the fence height to 'read/only'
	#
	vg_fence
	RC=$?
	:   return status of vg_fence is $RC

	exit 0
    ;;

    "active" )
	#
	:   The volume group is actually varied on this node right now.  The best
	:   that can be done is update LVMs information from the platter.  The
	:   limitation on this is that information known by the LVM device driver
	:   is not necessarily updated.
	#
	synclvodm -R -L $DISK $VG || {
	    :	exit status of synclvodm is $?
	    exit 1
	}
	dspmsg -s 38 cspoc.cat 8 'Volume group %1$s has been updated.\n' $VG >&2
	exit 0
    ;;

    "*" )
	#
	:   The only other possibilities are that this volume group is varied on
	:   in concurrent or passive mode.  In either case, an update is unneeded.
	#
	dspmsg -s 38 cspoc.cat 9 'Volume group %1$s is up-to-date. No action taken.\n' $VG >&2
	exit 0

    ;;

esac
