#!/bin/ksh

export try_out try_err cspoc_tmp_log
export FPATH=/usr/es/sbin/cluster/cspoc

cspoc_tmp_log=/var/hacmp/log/cel$$_tmplog
log_cmd $cspoc_tmp_log $0 $*

trap 'cexit $cspoc_tmp_log $?' EXIT
function cel_f1
{
    cel_s1=/tmp/cel$$_s1
    try_err=${cel_s1}.err
    try_out=${cel_s1}.out
    trap "log_output $cspoc_tmp_log ${cel_s1} 	    eval $E_LSPV_CMD" EXIT
    IFS=,$IFS
    for node in $_REFNODE; do
	cdsh $cel_s1 $node -q 	    eval $E_LSPV_CMD
	cel_rc=$(get_rc ${cel_s1} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		TRY_RC=$cel_rc
		    		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 24 "${_CMD}: Error executing lspv on node $node.\n" ${_CMD} $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f2
{
    cel_s2=/tmp/cel$$_s2
    try_err=${cel_s2}.err
    try_out=${cel_s2}.out
    trap "log_output $cspoc_tmp_log ${cel_s2} 	eval $E_LSPV_CMD" EXIT
    IFS=,$IFS
    for node in $_NODE; do
	cdsh $cel_s2 $node -q 	eval $E_LSPV_CMD
	cel_rc=$(get_rc ${cel_s2} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	    TRY_RC=$((TRY_RC+cel_rc))
		    	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 24 "${_CMD}: Error executing lspv on node $node.\n" ${_CMD} $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f3
{
    cel_s3=/tmp/cel$$_s3
    try_err=${cel_s3}.err
    try_out=${cel_s3}.out
    trap "log_output $cspoc_tmp_log ${cel_s3} 	/usr/es/sbin/cluster/utilities/clgetactivenodes -n $E_ACTIVE_NODE" EXIT
    IFS=,$IFS
    for node in $_ACTIVE_NODE; do
	cdsh $cel_s3 $node -q 	/usr/es/sbin/cluster/utilities/clgetactivenodes -n $E_ACTIVE_NODE
	cel_rc=$(get_rc ${cel_s3} $node)
	case $cel_rc in
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f4
{
    cel_s4=/tmp/cel$$_s4
    try_err=${cel_s4}.err
    try_out=${cel_s4}.out
    trap "log_output $cspoc_tmp_log ${cel_s4} 	    clgetvg $option $parameter" EXIT
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cdsh $cel_s4 $node -q 	    clgetvg $option $parameter
	cel_rc=$(get_rc ${cel_s4} $node)
	case $cel_rc in
	    0)
				#
				:   Stop on the first node that reports the owning volume group.
				:   Note that this is just the first one that knows about the
				:   volume group - it appears in the local ODM.  Actual volume
				:   group state is determined below.
				#
				TRY_RC=0
				IFS=${IFS#,}
		IFS=${IFS#,}
		return
		;;
	    *)
		if [ $cel_rc != 0 ]; then
		                    # 
		                    :   The C-SPOC communications mechanism does not provide a
		                    :   convenient indication of the difference between being
		                    :   unable to reach a remote node, and a failure of a command
		                    :   run on that remote node.  Attempt to distinguish that here
		                    :   by looking for an error message from that node.
		                    #                                                 
		    		if [[ -f $try_err ]] &&
		    		    ! grep -q "^${node}: " $try_err # ignore any 'not found' msg
		    		then
		    		    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 1 "${_CMD}: Can\'t reach $node, continuing anyway\n" ${_CMD} $node 
		    		fi
		    		TRY_RC=$cel_rc
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f5
{
    cel_s5=/tmp/cel$$_s5
    try_err=${cel_s5}.err
    try_out=${cel_s5}.out
    trap "log_output $cspoc_tmp_log ${cel_s5} 	clresactive -v $VG" EXIT
    cdsh $cel_s5 $_TARGET_NODES -q 	clresactive -v $VG
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cel_rc=$(get_rc ${cel_s5} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	   nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 1 "${_CMD}: can't reach $node, continuing anyway\n"  ${_CMD} $node  
		    	   cel_rc=0
		    	   TRY_RC=$((TRY_RC+cel_rc))
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f6
{
    cel_s6=/tmp/cel$$_s6
    try_err=${cel_s6}.err
    try_out=${cel_s6}.out
    trap "log_output $cspoc_tmp_log ${cel_s6} 	    clvaryonvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s6 $node -q1 	    clvaryonvg $VG
	cel_rc=$(get_rc ${cel_s6} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                    TRY_RC=$cel_rc
		                    nls_msg -2 -l $cspoc_tmp_log 24 7 "error executing clvaryonvg $DVG on node $node\n" $DVG $node 
		                    if [[ -s $try_out || -s $try_err ]]
		                    then
		                        #
		                        :   If stdout or stderr was captured for this failure, show
		                        :   the information to the user
		                        #
		                        nls_msg -2 -l $cspoc_tmp_log 6 7 "Error detail:"
		                        cat -q $try_out $try_err >&2
		                    fi
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f7
{
    cel_s7=/tmp/cel$$_s7
    try_err=${cel_s7}.err
    try_out=${cel_s7}.out
    trap "log_output $cspoc_tmp_log ${cel_s7} 	    cl_pvo -v $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s7 $node -q1 	    cl_pvo -v $VG
	cel_rc=$(get_rc ${cel_s7} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		#
		    		:   Log any error, but continue.
		    		#
		                    TRY_RC=$cel_rc
		                    nls_msg -2 -l $cspoc_tmp_log 24 7 "error executing clvaryonvg $DVG on node $node\n" $DVG $node 
		                    if [[ -s $try_out || -s $try_err ]]
		                    then
		                        #
		                        :   If stdout or stderr was captured for this failure, show
		                        :   the information to the user
		                        #
		                        nls_msg -2 -l $cspoc_tmp_log 6 7 "Error detail:"
		                        cat -q $try_out $try_err >&2
		                    fi
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f8
{
    cel_s8=/tmp/cel$$_s8
    try_err=${cel_s8}.err
    try_out=${cel_s8}.out
    trap "log_output $cspoc_tmp_log ${cel_s8} 		    varyonvg -n -b -u $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s8 $node 		    varyonvg -n -b -u $VG
	cel_rc=$(get_rc ${cel_s8} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log 49 26 "Error unlocking volume group %s\n" "$DVG"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f9
{
    cel_s9=/tmp/cel$$_s9
    try_err=${cel_s9}.err
    try_out=${cel_s9}.out
    trap "log_output $cspoc_tmp_log ${cel_s9}                     varyoffvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s9 $node                     varyoffvg $VG
	cel_rc=$(get_rc ${cel_s9} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                            TRY_RC=$((TRY_RC+cel_rc))
		                            nls_msg -2 -l $cspoc_tmp_log 49 26 "Error unlocking volume group %s\n" 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f10
{
    cel_s10=/tmp/cel$$_s10
    try_err=${cel_s10}.err
    try_out=${cel_s10}.out
    trap "log_output $cspoc_tmp_log ${cel_s10} 		eval $e_update_cmd" EXIT
    IFS=,$IFS
    for node in $NODE_LIST; do
	cdsh $cel_s10 $node -q 		eval $e_update_cmd
	cel_rc=$(get_rc ${cel_s10} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		    TRY_RC=$((TRY_RC+cel_rc))
		    		    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 16 "${_CMD}: Error executing clupdatevg $DVG $_IMPORT_PVID on node $node\n" ${_CMD} $DVG $_IMPORT_PVID $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f11
{
    cel_s11=/tmp/cel$$_s11
    try_err=${cel_s11}.err
    try_out=${cel_s11}.out
    trap "log_output $cspoc_tmp_log ${cel_s11} 		    varyoffvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s11 $node 		    varyoffvg $VG
	cel_rc=$(get_rc ${cel_s11} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 7 "${_CMD}: Error executing varyoffvg $DVG on node $node\n" ${_CMD} $DVG $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f12
{
    cel_s12=/tmp/cel$$_s12
    try_err=${cel_s12}.err
    try_out=${cel_s12}.out
    trap "log_output $cspoc_tmp_log ${cel_s12} 			cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s12 $node -q1 			cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s12} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			    #
		    			    :   Log any error, but continue.
		    			    #
		    			    nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f13
{
    cel_s13=/tmp/cel$$_s13
    try_err=${cel_s13}.err
    try_out=${cel_s13}.out
    trap "log_output $cspoc_tmp_log ${cel_s13} 		    varyonvg -c -P $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s13 $node 		    varyonvg -c -P $VG
	cel_rc=$(get_rc ${cel_s13} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 56 "${_CMD}: Error executing varyonvg -c -P $DVG on node $node\n" $_CMD $DVG 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f14
{
    cel_s14=/tmp/cel$$_s14
    try_err=${cel_s14}.err
    try_out=${cel_s14}.out
    trap "log_output $cspoc_tmp_log ${cel_s14} 		    cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s14 $node -q1 		    cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s14} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			#
		    			:   Log any error, but continue.
		    			#
		    			nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f15
{
    cel_s15=/tmp/cel$$_s15
    try_err=${cel_s15}.err
    try_out=${cel_s15}.out
    trap "log_output $cspoc_tmp_log ${cel_s15} 		cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s15 $node -q1 		cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s15} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		    #
		    		    :   Log any error, but continue.  If this is a real problem, the varyonvg will fail
		    		    #
		    		    nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f16
{
    cel_s16=/tmp/cel$$_s16
    try_err=${cel_s16}.err
    try_out=${cel_s16}.out
    trap "log_output $cspoc_tmp_log ${cel_s16} 		    varyonvg -n $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s16 $node 		    varyonvg -n $VG
	cel_rc=$(get_rc ${cel_s16} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log 49 29 "Error re-locking volume group %s\n" "$DVG"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f17
{
    cel_s17=/tmp/cel$$_s17
    try_err=${cel_s17}.err
    try_out=${cel_s17}.out
    trap "log_output $cspoc_tmp_log ${cel_s17} 	    exportvg $VG" EXIT
    IFS=,$IFS
    for node in $NODE_LIST; do
	cdsh $cel_s17 $node -q 	    exportvg $VG
	cel_rc=$(get_rc ${cel_s17} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		nls_msg -2 -l $cspoc_tmp_log 37 3 "${_CMD}: Could not export volume group $DVG\n" ${_CMD} ${DVG} 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f18
{
    cel_s18=/tmp/cel$$_s18
    try_err=${cel_s18}.err
    try_out=${cel_s18}.out
    trap "log_output $cspoc_tmp_log ${cel_s18}     eval $E_CHK_CMD" EXIT
    IFS=,$IFS
    for node in $_CLUSTER_NODES; do
	cdsh $cel_s18 $node -q1     eval $E_CHK_CMD
	cel_rc=$(get_rc ${cel_s18} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	nls_msg -l $cspoc_tmp_log ${_MSET} 41 \
		    	"${_CMD}: error trying to run lspv on node $node\n" ${_CMD} $node
		    	exit 1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f19
{
    cel_s19=/tmp/cel$$_s19
    try_err=${cel_s19}.err
    try_out=${cel_s19}.out
    trap "log_output $cspoc_tmp_log ${cel_s19} 	odmget -q $E_QUAL $E_CUAT" EXIT
    cdsh $cel_s19 $_CLUSTER_NODES -q 	odmget -q $E_QUAL $E_CUAT
    IFS=,$IFS
    for node in $_CLUSTER_NODES; do
	cel_rc=$(get_rc ${cel_s19} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	    TRY_RC=$(( TRY_RC + cel_rc ))
		    	    nls_msg -l ${cspoc_tmp_log} ${_MSET} 45 \
		    	    "${_CMD}: Unable to obtain logical volume names from cluster node $node\n" ${_CMD} $node
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f20
{
    cel_s20=/tmp/cel$$_s20
    try_err=${cel_s20}.err
    try_out=${cel_s20}.out
    trap "log_output $cspoc_tmp_log ${cel_s20}     eval $E_LSVG_CMD" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s20 $node -q1     eval $E_LSVG_CMD
	cel_rc=$(get_rc ${cel_s20} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	nls_msg -l $cspoc_tmp_log ${_MSET} 51 \
		    	"${_CMD}: Unable to determine the characteristics of volume group $DVG on node $CL_NODE\n" ${_CMD} $DVG $CL_NODE
		    	exit 1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
# @(#)23        1.24 src/43haes/usr/sbin/cluster/cspoc/plans/cl_mk_mndhb_lv.cel, hacmp.cspoc, 61haes_r714 4/17/12 17:47:12
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/cspoc/plans/cl_mk_mndhb_lv.cel 1.24 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 2007,2012 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# \$Id\$
###############################################################################
#   COMPONENT_NAME: CSPOC
#
# Name:
#       cl_mk_mndhb_lv.cel
#
# Description
#   This script will create a new Logical Volume for Multi-Node Disk Heartbeat.
#   The required parameters are:
#	Volume group name - VG to be created/modified
#	PVID - id of the disk to use for MNDHB
#	Logical volume name - name of the LV to be used for heartbeat 
#		(this is optional - generate a name if none specified)
#	Resource group name - resource group which includes the VG
#	Network name - name for the new diskhbmulti network
#
#   This script will perform the following:
#	Create the Volume Group if it does not already exist
#	Add the PVID to the Volume Group if it does already exist
#	Create the Logical Volume from the PVID
#	Add the Volume Group to the Resource Group
#	Create the specified MNDHB network
#
#   Usage: cl_mk_mndhb_lv -cspoc "[-f] [-g ResourceGroup]" [-f] [-L LVLabel] [-y VGname] [-e NetworkName] PVID <LVName>
#
#
# Return Values:
#       0       success
#       1       failure
#
###############################################################################
###############################################################################
# Start of main script
###############################################################################
# Include the PATH and PROGNAME initialization stuff
# @(#)69        1.8  src/43haes/usr/sbin/cluster/cspoc/plans/cl_path.cel, hacmp.cspoc, 61haes_r720, 1539B_hacmp720 9/10/15 13:28:25
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/cspoc/plans/cl_path.cel 1.8 
#  
# Licensed Materials - Property of IBM 
#  
# Restricted Materials of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1999,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
 ################################################################################
#   COMPONENT_NAME: CSPOC
#
# Name:
#       cl_path.cel
#
# Description:
#       C-SPOC Path Initialization Routine.  This routine is to be included
#       in all C-SPOC Execution Plans (e.g. '%include cl_path.cel').
#       it sets up the PATH environment variable to prevent hardcoding of 
#       path names in the CSPOC code.
#
# Arguments:
#       None.
#
# Return Values:
#	None.
#
# Environment Variables Defined:
#
#   PUBLIC:
#	PROGNAME Represents the name of the program 
#	HA_DIR Represents the directory the HA product is shipped under.
#
################################################################################
PROGNAME=${0##*/}
PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
# set the HA_DIR env variable to the HA directory
HA_DIR="es"
# Set up useful prompt for when 'set -x' is turned on through _DEBUG
if [[ -n $_DEBUG ]] && (( $_DEBUG == 9 ))
then
    PS4='${PROGNAME:-$_CMD}[$LINENO]: '
    set -x
fi
[[ -n $_DEBUG ]] &&
    print "DEBUG Entering $PROGNAME version 1.22"
# Initialize variables
_CMD_NAME=${0##*/}
_CSPOC_OPT_STR="f?g:n:"
# Defect 672184
# Update the -e flag to be mandatory flag with argument
_OPT_STR="+1L:f?y:e^V:r:s:"
_USAGE="$(dspmsg -s 126 cspoc.cat 9999 \
'Usage: cl_mk_mndhb_lv -cspoc \"[-f] [-g ResourceGroup] [-n NodeList]\" [-f] [-r ResourceGroup] [-V MajorNumber] [-L LVLabel] [-y VGname] [-e NetworkName] PVID <LVName>')"
_MSET=126
# This script requires HA 5.3.0.0 or higher
_VER="5300"
_VERSION="5.3.0.0"
# Include CELPP init code and verification routines.
#  ALTRAN_PROLOG_BEGIN_TAG                                                    
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                      
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog.
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/cspoc/plans/cl_init.cel 1.16.7.9 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1996,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/plans/cl_init.cel, 726, 2147A_aha726, Feb 05 2021 09:50 PM
################################################################################
#
# COMPONENT_NAME: CSPOC
#
# Name:
#       cl_init.cel
#
# Description:
#       C-SPOC Initialization Routine.  This routine is to be included
#       in all C-SPOC Execution Plans (e.g. '%include cl_init.cel').
#       It defines the ksh functions required to implement C-SPOC commands.
#
# Arguments:
#       None.
#
# Return Values:
#       None.
#
# Environment Variables Defined:
#
#   PUBLIC:
#       _OPT_STR            Specifies the list of valid command flags.
#                           Must be specified in the execution plan.
#
#       _CSPOC_OPT_STR      Specifies the list of valid CSPOC flags.
#                           Must be specified in the execution plan.
#
#       cspoc_tmp_log       Full path of the cspoc log file
#                           (/var/hacmp/log/cspoc.log).
#
#       _CLUSTER_NODES      A comma separated list of all nodes in the cluster.
#
#       _NODE_LIST          A comma separated list of nodes from the command
#                           line (i.e. Those specified by -n or implied by -g).
#
#       _TARGET_NODES       A comma separated list that specify the target
#                           nodes for a generated C-SPOC script.
#
#       BADNODES            A space-separated list that specifies the nodes
#                           that are either not defined in the cluster or not
#                           reachable for a generated C-SPOC script.
#
#       _RES_GRP            The resource group specified by -g on the
#                           command line
#
#       _SPOC_FORCE         Set to "Y" when -f specified.  Otherwise not set.
#
#       _DEBUG              Set to <debug_level> when -d specified.
#                           Otherwise not set.
#
#       _CMD_ARGS           The AIX Command Options and arguments from the
#                           C-SPOC command
#
#       _NUM_CMD_ARGS       The number of AIX Command Options and arguments
#                           from the C-SPOC command
#
#       _NON_FLG_ARGS       The non-flag arguments from the C-SPOC command.
#
#       _OF_NA              A list of the optional command flags specified
#                           that do NOT require an option argument.
#
#       _MF_NA              A list of the mandatory command flags specified
#                           that do NOT require an option argument.
#
#       _OF_WA              A list of the optional command flags specified
#                           that require an option argument.
#
#       _MF_WA              A list of the mandatory command flags specified
#                           that require an option argument.
#
#       _VALID_FLGS         A list of valid command flags.
#
#       _CSPOC_OPTS         The CSPOC Options specified on the command line
#                           following the '-cspoc' flag.
#
#       _CSPOC_OF_NA        A list of the optional CSPOC flags specified that
#                           do NOT require an option argument.
#
#       _CSPOC_MF_NA        A list of the mandatory CSPOC flags specified that
#                           do NOT require an option argument.
#
#       _CSPOC_OF_WA        A list of the optional CSPOC flags specified that
#                           require an option argument.
#
#       _CSPOC_MF_WA        A list of the mandatory CSPOC flags specified that
#                           require an option argument.
#
#       _CSPOC_VALID_FLGS   A list of valid CSPOC flags for this CSPOC command.
#
#       CLUSTER_OVERRIDE    Flag to Cluster Aware AIX Commands to signal that
#                           base AIX commands should be allowed to operate.
#                           Applies to 7.1.0 and later.
#
################################################################################
################################################################################
#
# _get_node_list
#
# DESCRIPTION:
#   Generates two lists _CLUSTER_NODES is a list of all nodes in the cluster.
#
################################################################################
function _get_node_list
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_node_list version 1.16.7.9"
        if (( $_DEBUG >= 8 )); then
            typeset PROGNAME="_get_node_list"
            set -x
        fi
    fi
    unset _CLUSTER_NODES
    typeset NODE IP_ADDR
    #
    : GET A comma separated LIST OF ALL NODES IN THE CLUSTER
    #
    _CLUSTER_NODES=$(IFS=, set -- $(clodmget -q "object = COMMUNICATION_PATH" -f name -n HACMPnode) ; print "$*")
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: CLUSTER NODES [${_CLUSTER_NODES}]"
        print "DEBUG: Leaving _get_node_list"
    fi
    #
    : ENSURE THAT NODES FOUND FOR THE CLUSTER
    #
    if [[ -z ${_CLUSTER_NODES} ]]; then
        nls_msg -2 21 6 \
            "${_CMD}: The cluster does not appear to be configured - no nodes are defined.  \n  Configure the cluster, nodes and networks then try this operation again.\n" $_CMD
        return 1
    fi
    return 0
} # End of "_get_node_list()"
################################################################################
#
# _get_target_nodes
#
# DESCRIPTION
#   Sets environment variable $_TARGET_NODES to the list of cluster
#   on which the C-SPOC command is to be executed.
#
#	1 - If a node list was specified $_TARGET_NODES is set to
#	    the nodes listed.
#
#	2 - If a resource group was specified $_TARGET_NODES is set
#	    to the list of nodes that are participating in that
#	    resource group.
#
#	3 - If neither a node list or resource group has been specified
#	    then $_TARGET_NODES is set to a list of all nodes in the cluster.
#
################################################################################
function _get_target_nodes
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_target_nodes version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_target_nodes"
            set -x
        fi
    fi
    typeset NODE=""
    integer GTN_RC=-1
    #
    : If given a node list, or the nodes in a resource group, use those
    #
    if [[ -n $_NODE_LIST || -n $_RG_NODE_LIST ]]
    then
        _TARGET_NODES=$(IFS=, set -- $_NODE_LIST $_RG_NODE_LIST ; print "$*")
        GTN_RC=0
    #
    : If no node list given, assume all cluster nodes, if we can find them
    #
    elif [[ -n $_CLUSTER_NODES ]]
    then
        _TARGET_NODES="$_CLUSTER_NODES"
        GTN_RC=0
    #
    : Else cannot figure out where to run this
    #
    else
        nls_msg -2 -l ${cspoc_tmp_log} 4 6 \
        "%s: Unable to determine target node list!\n" "$_CMD"
        GTN_RC=1
    fi
    return $GTN_RC
} # End of "_get_target_nodes()"
################################################################################
#
# _get_rgnodes
#
# DESCRIPTION
#   Gets a list of nodes associated with the resource group specified.
#
################################################################################
function _get_rgnodes
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_rgnodes version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_rgnodes"
            set -x
        fi
    fi
    if [[ -z $1 ]]
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 9 \
            "%s: _get_rgnodes: A resource group must be specified.\n" "$_CMD"
        return 1
    fi
    _RG_NODE_LIST=$(clodmget -q "group = $1" -f nodes -n HACMPgroup)
    if [[ -z $_RG_NODE_LIST ]]
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 50 \
            "%s: Resource group %s not found.\n" "$_CMD" "$1"
        return 1
    fi
    return 0
} # End of "_get_rgnodes()"
#######################################################################
#
# _getopts
#
# DESCRIPTION
#   Parses comand line options for C-SPOC commands.
#
#######################################################################
#
# OPTION STRING
#   The _getopts() routine requires the execution plan to define the
#   environment variable $_OPT_STR which is refered to as the option
#   string.  The option string is used to define valid and/or required
#   flags, the required number of non-flag arguments, and what flags
#   may or may not be specified together.
#
#    Operator   Description                                  Example
#    --------   ------------------------------------------   ---------
#	()	Groups mutually required flags               (c!d:)
#	[]	Groups mutually exclusive flags              [f,b,]
#
#	?	Optional flag (default)                      b?
#	!	Mandatory flag                               c!
#
#	:	Optional flag that requires an argument      d:
#	^	Mandatory flag that requires an argument     e^
#
#	.	Optional multi-byte flag
#	,	Mandatory multi-byte flag                    f,
#
#	+N	Indicates that N non-flag arguments are.     +2
#               required. It must be at the beginning of
#               the option string.
#
#   Notes:
#	1 - A flag that can be specified with or without an argument
#           would be specified twice as follows: _OPT_STR="a?a:"
#
#	2 - A flag that requires an argument cannot also be the first
#           letter of a multi-byte flag.  (i.e. -b arg -boot ) as there
#           is no way to differentiate between the two.
#
#  Example:
#    The following option string would correspond to the usage below
#    In the usage '[]' indicates optional flags and '()' indicates
#    grouping.
#
#	_OPT_STR="+2ab?(c!d:)e^[f,b,]g."
#
#    Usage:
#     cmd [-a] [-b] -c [-d arg] -e arg ( -foo | -bar ) [-go] arg1 arg2 [arg3]
#
#
#######################################################################
function _getopts
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _getopts 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_opts"
            set -x
        fi
    fi
    typeset CMD=${0##*/}
    # unset the following variables to avoid these variables being
    # influenced implicitly by external environment. Note that we will
    # not unset/touch _DEBUG since it is being checked even before hitting
    # this part of the code. i.e. depending upon the _DEBUG flag we set
    # set -x option initially itself.
    unset _NODE_LIST
    unset _RES_GRP
    unset _CSPOC_QUIET
    # LOCAL VARIABLES
    typeset _OPT_STR _CSPOC_OPT_STR OPT X Y
    typeset _VALID_FLGS _CSPOC_VALID_FLGS
    typeset _OF_NA _MF_NA _OF_WA _MF_WA
    typeset _CSPOC_OF_NA _CSPOC_MF_NA _CSPOC_OF_WA _CSPOC_MF_WA
    typeset _GOPT=no _NOPT=no
    # THE FIRST TWO ARGS MUST BE OPTION STRINGS
    _CSPOC_OPT_STR=$1
    _OPT_STR=$2
    shift 2
    # CHECK CSPOC OPT STRING SPECIFIED IN THE EXECUTION PLAN
    # FOR OPTIONAL OR REQUIRED FLAGS
    [[ $_CSPOC_OPT_STR == *g^* ]] && _GOPT=req
    [[ $_CSPOC_OPT_STR == *g:* ]] && _GOPT=opt
    [[ $_CSPOC_OPT_STR == *n^* ]] && _NOPT=req
    [[ $_CSPOC_OPT_STR == *n:* ]] && _NOPT=opt
    # CHECK IF THE OPTION STRINGS SPECIFY A REQUIRED NUMBER OF NON-FLAG ARGS
    if [[ $_OPT_STR == +* ]]
    then
        X=${_OPT_STR#??}
        Y=${_OPT_STR%"$X"}
        _OPT_STR=$X
        _NUM_ARGS_REQ=${Y#?}
    fi
    # PARSE THE OPTION STRING ($_OPT_STR) INTO FIVE LISTS
    #  ${_OF_NA} is a list of optional flags that DO NOT take an option arg.
    #  ${_MF_NA} is a list of mandatory flags that DO NOT take an option arg.
    #  ${_OF_WA} is a list of mandatory flags that DO take an option argument
    #  ${_MF_WA} is a list of optional flags that DO take an option argument
    #  ${_VALID_FLGS} is a list of all valid flags.
    # Note that both strings start and end with a space (to facilitate grepping)
    # and contain a list of space separated options each of which is preceded
    # by a minus sign.
    # THE FOLLOWING WHILE LOOP SIMPLY ORGANIZES THE VALID FLAGS INTO
    # FOUR LISTS THAT CORRESPOND TO THE FOUR FLAG TYPES LISTED ABOVE
    # AND A FIFTH LIST THAT INCLUDES ALL VALID FLAGS.
    X=${_OPT_STR}
    [[ $X == '-' ]] && X=""
    while [[ -n ${X} ]]
    do
        # GET THE NEXT LETTER OF THE OPTION STRING
        Y=${X#?}
        OPT=${X%"$Y"}
        X=${Y}
        # CHECK FOR AND PROCESS MUTALLY REQUIRED OR MUTUALLY EXCLUSIVE FLAGS
        case $OPT in
            '(') # STARTS A GROUP OF MUTUALLY REQUIRED FLAGS
                 if [[ -n $MUTREQ ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "("'
                     return 1
                 fi
                 MUTREQ=Y
                 continue
            ;;
            ')') # ENDS A GROUP OF MUTUALLY REQUIRED FLAGS
                 if [[ -z $MUTREQ ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character ")"'
                     return 1
                 fi
                 MUTREQ=""
                 MUTREQ_FLAGS=$MUTREQ_FLAGS" "
                 continue
            ;;
            '[') # STARTS A GROUP OF MUTUALLY EXCLUSIVE FLAGS
                 if [[ -n $MUTEX ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "["'
                     return 1
                 fi
                 MUTEX=Y
                 continue
            ;;
            ']') # ENDS A GROUP OF MUTUALLY EXCLUSIVE FLAGS
                 if [[ -z $MUTEX ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "]"'
                     return 1
                 fi
                 MUTEX=""
                 MUTEX_FLAGS=$MUTEX_FLAGS" "
                 continue
            ;;
        esac
        # KEEP A LIST OF MUTUALLY EXCLUSIVE FLAGS
        if [[ -n $MUTEX && $MUTEX_FLAGS != *${OPT}* ]]; then
            MUTEX_FLAGS=${MUTEX_FLAGS}${OPT}
        fi
        # KEEP A LIST OF MUTUALLY REQUIRED FLAGS
        if [[ -n $MUTREQ && $MUTREQ_FLAGS != *${OPT}* ]]; then
            MUTREQ_FLAGS=${MUTREQ_FLAGS}${OPT}
        fi
        # KEEP A LIST OF ALL VALID FLAGS
        _VALID_FLGS="${_VALID_FLGS} -$OPT"
        # DETERMINE THE FLAG TYPE AS DESCRIBED ABOVE
        # ADD THE FLAG TO THE APPROPRIATE LIST AND
        # STRIP OFF THE FLAG TYPE IDENTIFIER FROM
        # THE OPTION STRING '${_OPT_STR}'.
        case $X in
            '.'*) # OPTIONAL MULTI-BYTE FLAG
                  X=${X#.}
                  _OF_MB="${_OF_MB} -$OPT"
            ;;
            ','*) # MANDATORY MULTI-BYTE FLAG
                  X=${X#,}
                  _MF_MB="${_MF_MB} -$OPT"
            ;;
            ':'*) # OPTIONAL FLAG THAT REQUIRES AN ARGUMENT
                  X=${X#:}
                  _OF_WA="${_OF_WA} -$OPT"
            ;;
            '^'*) # MANDATORY FLAG THAT REQUIRES AN ARGUMENT
                  X=${X#^}
                  _MF_WA="${_MF_WA} -$OPT"
            ;;
            '!'*) # MANDATORY FLAG
                  X=${X#!}
                  _MF_NA="${_MF_NA} -$OPT"
            ;;
            '?'*) # OPTIONAL FLAG
                  X=${X#?}
                  _OF_NA="${_OF_NA} -$OPT"
            ;;
            *)    # OPTIONAL FLAG
                  _OF_NA="${_OF_NA} -$OPT"
            ;;
        esac
    done # End of the option "while" loop
    # TACK A SPACE ONTO THE END OF EACH LIST TO MAKE OPTION GREPPING SIMPLE
    _VALID_FLGS=$_VALID_FLGS" "
    _OF_NA=$_OF_NA" " ; _OF_WA=$_OF_WA" " ; _OF_MB=$_OF_MB" "
    _MF_NA=$_MF_NA" " ; _MF_WA=$_MF_WA" " ; _MF_MB=$_MF_MB" "
    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 3 )) 
    then
        print "DEBUG(3): _OF_NA=$_OF_NA"
        print "DEBUG(3): _MF_NA=$_MF_NA"
        print "DEBUG(3): _OF_WA=$_OF_WA"
        print "DEBUG(3): _MF_WA=$_MF_WA"
        print "DEBUG(3): _OF_MB=$_OF_MB"
        print "DEBUG(3): _MF_MB=$_MF_MB"
        print "DEBUG(3): _VALID_FLGS=$_VALID_FLGS"
    fi
    # PARSE THE COMMAND LINE ARGS
    let _NUM_CMD_ARGS=0
    while [[ -n $* ]]
    do
        THIS_FLAG=$1
        THIS_ARG=${THIS_FLAG#??}
        THIS_FLAG=${THIS_FLAG%"$THIS_ARG"}
        if [[ -n $_DEBUG ]]
        then
            print "THIS_FLAG=\"$THIS_FLAG\""
            print "THIS_ARG=\"$THIS_ARG\""
        fi
        if [[ $1 == '-cspoc' ]]
        then
            #
            :   Check for and process any CSPOC flags
            #
		_CSPOC_OPTS=$2
            if [[ -z $_CSPOC_OPTS || $_CSPOC_OPTS == *([[:space:]]) ]]
            then
                SHIFT=1
            else
                SHIFT=2
                while getopts ':fd#n:?g:q' _CSPOC_OPTION $_CSPOC_OPTS 
                do
                    case $_CSPOC_OPTION in
                        f ) :   Force option
                            export _SPOC_FORCE=Y
                        ;;
                        d ) :   Debug level
                            export _DEBUG=$OPTARG
                        ;;
                        n ) :   Target node list
                            export _NODE_LIST=$(print $OPTARG | sed -e"s/['\"]//g")
                        ;;
                        g ) :   Target resource group 
                            export _RES_GRP=$(print $OPTARG | sed -e"s/['\"]//g")
                        ;;
                        q ) :   Suppress output to stdout
                            export _CSPOC_QUIET=YES
                        ;;
                        : ) :   Missing operand - ignored
                        ;;
                        * ) :   Invalid flag specified
                            nls_msg -2 -l ${cspoc_tmp_log} 4 13 \
                                "%s: Invalid C-SPOC flag [%s] specified.\n" \
                                "$_CMD" "$_CSPOC_OPTION"
                            print "$_USAGE"
                            exit 2
                        ;;
                    esac
                done
            fi
            #
            :   Validate required and mutually exclusive CSPOC operands
            #
            if [[ $_GOPT == "no" && -n $_RES_GRP ]]
            then
                #
                :   Is "-g" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 60 \
                    "%s: C-SPOC -g flag is not allowed for this command.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_NOPT == "no" && -n $_NODE_LIST ]]
            then
                #
                :   Is "-n" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 61 \
                    "%s: C-SPOC -n flag is not allowed for this command.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_GOPT == "req" && $_NOPT == "req" ]] && \
                 [[ -z $_RES_GRP && -z $_NODE_LIST ]]
            then    
                #
                :   Check for "-g" or "-n" present when one
                :   or the other is required
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 62 \
                    "%s: Either the '-g' or the '-n' C-SPOC flag must be specified.\n" "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ -n $_RES_GRP && -n $_NODE_LIST ]]
            then
                #
                :   Check that both "-g" and "-n" are not specified together
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 63 \
                    "%s: C-SPOC -g and -n flags are mutually exclusive.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_NOPT != "req" && $_GOPT == "req" && -z $_RES_GRP ]]
            then
                #
                :   Is only "-g" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 64 \
                    "%s: C-SPOC -g flag is required.\n" "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_GOPT != "req" && $_NOPT == "req" && -z $_NODE_LIST ]]
            then
                #
                :   Is only "-n" required
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 65 \
                    "%s: C-SPOC -n flag is required.\n" "$_CMD"
                print "$_USAGE"
                return 2
            fi
            shift $SHIFT
        elif [[ "$THIS_FLAG" != -* ]]
        then
            #  AIX COMMAND ARGUMENT THAT IS NOT AN OPTION FLAG
            #  NEED TO ACCOMODATE OPTIONS THAT MAY OR MAY NOT HAVE AN ARGUMENT.
            #  IF OPT_ARG DOESN'T START WITH A '-' ITS AN ARGUMENT OTHERWISE
            #  CONSIDER IT TO BE THE NEXT OPTION
            let _NUM_CMD_ARGS=$_NUM_CMD_ARGS+$#
            TMP_FLAG=""
            while (( $# > 0 ))
            do
                case "$1" in
                    -*) TMP_FLAG=$(echo $1 | cut -c1-2)
                        _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$TMP_FLAG"
                        TMP_ARG1=$(echo $1 | cut -c3-)
                        if [[ -n $TMP_ARG1 ]] 
                        then
                            TMP_ARG1="$(print -- $TMP_ARG1 |\
                                        clencodearg $_ENCODE_ARGS)"
                            _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$TMP_ARG1"
                            TMP_FLAG=""
                        fi
                    ;;
                    *) TMP_ARG2="$(print -- $1 | clencodearg $_ENCODE_ARGS)"
                       _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}${TMP_ARG2}
                       if [[ -z $TMP_FLAG ]]
                       then
                           _NON_FLG_ARGS=${_NON_FLG_ARGS:+"${_NON_FLG_ARGS} "}"${TMP_ARG2}"
                       fi
                       TMP_FLAG=""
                esac
                shift
            done
            break
        else	# COME INTO HERE WITH $THIS_FLAG and $THIS_ARG SET
            ARG_CHECK=Y
            ARG_NEXT=""
            while [[ -n $ARG_CHECK ]]
            do
                # NOW CHECK IF WE STILL HAVE MORE FLAGS TO PROCESS
                [[ -z $THIS_ARG ]] && ARG_CHECK=""
                if print -- "$_OF_MB $_MF_MB" | grep -- "$THIS_FLAG" > /dev/null
                then
                    # THIS IS A MULTI-BYTE FLAG
                    if [[ -z $THIS_ARG ]]
                    then
                        ( print -- "$_OF_NA $_MF_NA" | grep -- "$THIS_FLAG" > /dev/null ) || \
                        {
                            # THIS FLAG REQUIRES AN ARGUMENT
                            nls_msg -2 -l ${cspoc_tmp_log} 4 19 \
                                "%s: Invalid option [%s].\n" "$_CMD" "$1"
                            print "$_USAGE"
                            exit 2
                        }
                    fi
                    # VALID AIX COMMAND MULTI-BYTE OPTION (WITHOUT AN ARGUMENT)
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG$THIS_ARG"
                    shift
                    ARG_CHECK=""	# Disable further processing of $THIS_ARG as flags
                elif print -- "$_OF_WA $_MF_WA" | grep -- "$THIS_FLAG" > /dev/null
                then
                    # THIS IS A FLAG THAT REQUIRES AN ARGUMWENT
                    # HANDLE OPTIONAL SPACE BETWEEN FLAG AND ITS ARG
                    if [[ -z $THIS_ARG && -z $ARG_NEXT ]]
                    then
                        THIS_ARG=$2		# THERE WAS A SPACE
                        SHIFT=2
                    else
                        SHIFT=1		# THERE WAS NO SPACE
                    fi
                    # NOW VALIDATE THAT WE HAVE AN ARG AND THAT IT IS VALID
                    if [[ -z $THIS_ARG || $THIS_ARG == -* ]]
                    then
                        # IF THERE IS NO ARG THEN CHECK IF FLAG MAY BE SPECFIED WITHOUT ONE
                        print -- "$_OF_NA $_MF_NA" | grep -q -- "$THIS_FLAG" ||\
                        {
                            # THIS FLAG REQUIRES AN ARGUMENT
                            nls_msg -2 -l ${cspoc_tmp_log} 4 19 \
                            "%s: Option [%s] requires an argument.\n" "$_CMD" "$1"
                            print "$_USAGE"
                            exit 2
                        }
                    fi
                    # VALID AIX COMMAND OPTION WITH AN ARGUMENT
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG $(print -- $THIS_ARG | clencodearg $_ENCODE_ARGS)"
                    shift $SHIFT
                    # Disable further processing of $THIS_ARG as flags
                    ARG_CHECK=""
                elif print -- "$_OF_NA $_MF_NA" | grep -q -- "$THIS_FLAG"
                then
                    # THIS IS A FLAG THAT DOES NOT TAKE AN ARGUMENT
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG"
                    # IF THIS FLAG WAS OBTAINED FROM $THIS_FLAG THEN WE WANT TO
                    # SHIFT. IF IT WAS OBTAINED FROM $THIS_ARG THEN WE DONT
                    [[ -z $ARG_CHECK ]] && shift
                    # THIS FLAG DOES NOT TAKE AN OPTION ARGUMENT SO ASSUME
                    # THAT "$THIS_ARG" SPECIFIES MORE FLAGS TO PROCESS.
                    if [[ -n $THIS_ARG ]]
                    then
                        # GET THE NEXT FLAG, ADJUST $THIS_ARG,
                        # AND KEEP PROCESSING.
                        X=${THIS_ARG#?}
                        THIS_FLAG="-${THIS_ARG%$X}"
                        THIS_ARG=$X
                        ARG_NEXT=Y
                    fi
                else
                    nls_msg -2 -l ${cspoc_tmp_log} 4 26 \
                    "%s: Invalid option [%s].\n" "$_CMD" "$1"
                    print "$_USAGE"
                    exit 2
                fi
            done
        fi
    done
    ##
    # PERFORM CHECKING OF THE AIX COMMAND FLAGS
    ##
    # CHECK FOR REQUIRED NUMBER OF NON-FLAG ARGUMENTS
    if (( ${_NUM_CMD_ARGS:-0} < ${_NUM_ARGS_REQ:-0} ))
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 27 \
            "%s: Missing command line arguments.\n" "$_CMD"
        print "$_USAGE"
        return 2
    fi
    # THIS IS WHERE WE CHECK FOR MANDATORY FLAGS, MUTUALLY EXCLUSIVE FLAGS,
    # AND MUTUALLY REQUIRED FLAGS
    # CHECK FOR MUTUALLY REQUIRED FLAGS
    # FOR EACH GROUP OF FLAGS SPECIFIED IN $MUTREQ_FLAGS WE WILL COUNT HOW
    # MANY WE NEED AND HOW MANY ARE GIVEN ON CMD LINE.  IF THESE VALUES ARE
    # NOT EQUAL PRINT AN ERROR AND RETURN NON-ZERO
    typeset -i CNT=0 N=0
    for GROUP in $MUTREQ_FLAGS
    do
        # GET A COUNT OF HOW MANY FLAGS IN THIS GROUP
        print -n $GROUP | wc -c | read N
        integer CNT=0
        F=""
        while [[ -n $GROUP ]]
        do
            # GET THE NEXT FLAG IN THE GROUP
            A=${GROUP#?}
            B=${GROUP%"$A"}
            GROUP=$A
            # IF THIS FLAG IS USED INCREMENT THE COUNTER
            if [[ "$(print -- $_CMD_ARGS | grep -- '-'${B})"' ' != ' ' ]]
            then
                (( CNT = CNT + 1 ))
            fi
            F=${F:+"$F, "}"-"$B
        done
        # VERIFY THAT THE COUNTER EQUALS THE TOTAL NUMBER OF FLAGS IN THE GROUP
        if (( $CNT != $N ))
        then
            print "$_CMD: One or more flags [$F] were not specified."
            print "$_CMD: Specifying any one of these flags requires the others."
            return 2
        fi
    done
    # CHECK FOR MUTUALLY EXCLUSIVE FLAGS
    # FOR EACH GROUP OF FLAGS SPECIFIED IN $MUTEX_FLAGS WE WILL COUNT HOW
    # MANY ARE GIVEN ON CMD LINE.  IF MORE THAN ONE IS GIVEN THEN PRINT
    # AN ERROR AND RETURN NON-ZERO
    for GROUP in $MUTEX_FLAGS
    do
        # GET A COUNT OF HOW MANY FLAGS IN THIS GROUP
        integer CNT=0
        F=""
        while [[ -n $GROUP ]]
        do
            # GET THE NEXT FLAG IN THE GROUP
            A=${GROUP#?}
            B=${GROUP%"$A"}
            GROUP=$A
            # IF THIS FLAG IS USED INCREMENT THE COUNTER
            if [[ -n "$(print -- $_CMD_ARGS | grep -- '-'${B})" ]]
            then
                (( CNT = CNT + 1 ))
            fi
            F=${F:+"$F, "}"-"$B
        done
        # VERIFY THAT THE COUNTER EQUALS THE TOTAL NUMBER OF FLAGS IN THE GROUP
        if (( $CNT > 1 ))
        then
            print "$_CMD: The flags [$F] are mutually exclusive."
            print "$_CMD: Only one of these flags may be specified."
            return 2
        fi
    done
    # CHECK FOR ALL MANDATORY FLAGS
    for X in $_MF_NA $_MF_WA
    do
        # CHECK THAT MANDATORY FLAG IS ON COMMAND LINE
        if [[ -z "$(print -- $_CMD_ARGS | grep -- ${X})" ]]
        then
            # THE FLAG WAS NOT SPECIFIED SO WE MUST FIRST CHECK IF ANOTHER
            # FLAG WAS SPECIFIED THAT IS MUTUALLY EXCLUSIVE WITH THIS ONE.
            for GROUP in $MUTEX_FLAGS
            do
                OK=""
                while [[ -n $GROUP ]]
                do
                    Y=${GROUP#?}
                    Z=${GROUP%"$Y"}
                    GROUP=$Y
                    print -- " $_CMD_ARGS " |\
                        grep -- "-${Z} " > /dev/null && OK=Y
                done
                [[ -n $OK ]] && break
            done
            # "$OK" IS NULL IF NO FLAG IN THIS MUTEX GROUP WAS GIVEN
            if [[ -z $OK ]]
            then
                nls_msg -2 -l ${cspoc_tmp_log} 4 29 \
                "%s: Mandatory option [%s] not specified.\n" "$_CMD" "$X"
                print "$_USAGE"
                return 2
            fi
        fi
    done
    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 3 )) 
    then
        print -- "DEBUG(3): _CMD_ARGS=$_CMD_ARGS"
    fi
    return 0
} # End of "_getopts()"
################################################################################
#
# DESCRIPTION:
#   Updates the C-SPOC logfile
#
################################################################################
function cexit
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering cexit version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME=cexit
            set -x
        fi
    fi
    typeset USAGE="USAGE: cexit <temp_log_file> <return_code>"
    # CHECK USAGE
    (( $# != 2 )) && print "$_CMD: $USAGE"
    typeset TEMP_LOG=$1
    typeset RC=$2
    #
    : Read the HACMPlogs ODM for the pathname of the cspoc.log log file
    : If the ODM is empty or corrupted, use /var/hacmp/log/cspoc.log
    #
    DESTDIR=$(clodmget -q "name = cspoc.log" -f value -n HACMPlogs)
    if [[ -n $DESTDIR ]]
    then
        CSPOC_LOG="$DESTDIR/cspoc.log"
    else
        dspmsg scripts.cat 463 "The cluster log entry for %s could not be found in the HACMPlogs ODM.\n" "cspoc.log"
        dspmsg scripts.cat 464 "Defaulting to log directory %s for log file %s.\n" "/var/hacmp/log" "cspoc.log"
        CSPOC_LOG="/var/hacmp/log/cspoc.log"
    fi
    #
    : CHECK ARGS
    #
    if [[ ! -f ${TEMP_LOG} ]]
    then
        nls_msg -2 -l ${CSPOC_LOG} 4 39 \
            "%s: Unable to open file: %s\n" "${TEMP_LOG}" "$_CMD"
        return 1
    fi
    #
    :  If the log file does not exist, create it.
    #
    if [[ ! -f ${CSPOC_LOG} ]]; then
        touch ${CSPOC_LOG}
    fi
    #
    :  Keep the information in the log file if we have write permission
    #
    if [[ -w $CSPOC_LOG ]]
    then
        cat ${TEMP_LOG} >> $CSPOC_LOG
    fi
    if (( $RC == 0 )) && ( [[ -z $_DEBUG ]] || (( $_DEBUG <= 8 )) ) then
        rm -f ${TEMP_LOG%_*}*
        rm -f /tmp/cel$$_s*.err
        rm -f /tmp/cel$$_s*.out
        rm -f /tmp/cel$$.cache
    fi   
} # End of "cexit()"
################################################################################
#
# _cspoc_verify - Performs verification of a number of CSPOC requirments.
#                 Certain requirements, if not met, produce a hard error
#                 and the routine produces an immediate exit of the script.
#                 Other requirements, if not met, produce soft errors that
#                 result in the routine returning a value of '1'.  The
#                 calling script will then exit unless the CSPOC force flag
#                 has been set.
#
################################################################################
function _cspoc_verify
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _cspoc_verify version 1.16.7.9 + 20527,842,758"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_cspoc_verify"
            set -x
        fi
    fi
    typeset NODE 
    typeset bad_targets		#   space separated list of unreachable nodes
    typeset CAA_down_nodes	#   target hosts CAA says are down
    typeset CAA_node_name	#   CAA host node name
    integer _RETCODE=0		#   Assume OK until proven otherwise
    typeset BADNODES		#   Space separated list of invalid nodes
    typeset down_ha_nodes	#   target HA nodes CAA says are down
    typeset good_targets	#   target HA nodes that should work
    typeset bad_level_nodes	#   target HA nodes below minimum release level
    if [[ $_CSPOC_CALLED_FROM_SMIT != 'true' ]]
    then
	#
	:   If not called from SMIT, which will surely set things
	:   up correctly, check to make sure target nodes are valid.
	#
        for NODE in $(IFS=, set -- $_TARGET_NODES ; print $*)	
	do
	    #
	    :   Collect a list of given nodes that do not
	    :   show up in the local cluster definition.
	    #
	    if [[ $_CLUSTER_NODES != @(?(*,)$NODE?(,*)) ]]
	    then
		BADNODES=${BADNODES:+$BADNODES" "}$NODE
		nls_msg -2 -l ${cspoc_tmp_log} 4 44 \
		"%s: The node [%s] is not a part of this cluster.\n" "$_CMD" "$NODE"
	    fi
	done
	if [[ -n $BADNODES ]]
	then
	    #
	    :	Remove any invalid node names from the node list
	    #
	    save_targets=""
	    for ha_node in $(IFS=, set -- $_TARGET_NODES ; print $*)
	    do
		if [[ $BADNODES != @(?(* )${ha_node}?( *)) ]]
		then
		    save_targets=${save_targets:+"${save_targets},"}${ha_node}
		fi
	    done
	    _TARGET_NODES=$save_targets
	    if [[ -z $_TARGET_NODES ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because all of the target nodes, %s, are not part of this cluster\n" "$_CMD" $LINENO "$BADNODES"
		exit 1	    #	No valid nodes found
	    else
		_RETCODE=1  #	Continue if 'forced' specified
	    fi
	fi
    fi
    cluster_version=$(clodmget -f cluster_version -n HACMPcluster)
    if [[ -x /usr/lib/cluster/incluster ]] && /usr/lib/cluster/incluster || \
       (( $cluster_version >= 15 )) 
    then
	#
	:   If at a level where CAA is in place, check to see if
	:   CAA can provide information on the state of nodes.
	#
	LC_ALL=C lscluster -m 2>/dev/null | \
	egrep 'Node name:|State of node:' | \
	cut -f2 -d: | \
	paste -d' ' - - | \
	while read CAA_node_name state
	do
	    if [[ -n $CAA_node_name ]]
	    then
		if [[ $state != 'UP' && \
		    $state != @(?(* )NODE_LOCAL?( *)) && \
		    $state != @(?(* )REACHABLE THROUGH REPOS DISK ONLY?( *)) &&  \
		    $state != 'DOWN  STOPPED' ]]
		then
		    #
		    #	The purpose of this check is to avoid long timeouts
		    #	trying to talk to a node known to be dead.
		    #	- The local node is always reachable
		    #	- A stopped node may be reachable; halevel checks below
		    #	- A node reachable only through the repository disk
		    #	  may be reachable: just because CAA declares the 
		    #	  network to be down doesn't mean clcomd can't get 
		    #	  through; hlevel checks below
		    #
		    :   Node $CAA_node_name is 'DOWN' 
		    #
		    CAA_down_nodes=${CAA_down_nodes:+"${CAA_down_nodes} "}${CAA_node_name}
		    #
		    :   Find the PowerHA node name corresponding to the
		    :   $CAA_node_name - the name must be a label on an
		    :   interface on some node.
		    #
		    host_ip=$(LC_ALL=C host $CAA_node_name | cut -f3 -d' ')
		    host_ip=${host_ip%,}
		    if [[ -n $host_ip && $host_ip == @(+([0-9.])|+([0-9:])) ]]
		    then
			down_ha_node=$(clodmget -q "identifier = ${host_ip}" -f nodename -n HACMPadapter)
			if [[ -n $down_ha_node ]] 
			then
			    down_ha_nodes=${down_ha_nodes:+"$down_ha_nodes "}${down_ha_node}
			    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
			    "%s[%d]: The CAA lscluster command indicates that node %s[%s] is \"%s\" and not active.\n" "$_CMD" $LINENO $down_ha_node $CAA_node_name "$state"
			fi
		    fi
		fi
	    fi
	done
    fi
    if [[ -n $down_ha_nodes ]]
    then
	#
	:   CAA says that nodes $down_ha_nodes are not active
	:   Construct a list of the remaining nodes, to use to
	:   check to see if clcomd is running.
	#
	for ha_node in $(IFS=, set -- $_TARGET_NODES ; echo $* )
	do
	    if [[ $down_ha_nodes != @(?(* )${ha_node}?( *)) ]]
	    then
		good_targets=${good_targets:+"${good_targets} "}${ha_node}
	    fi
	done
    else
	#
	:   CAA gives no reason to suspect nodes are not reachable
	#
        good_targets=$(IFS=, set -- $_TARGET_NODES ; echo $* )
    fi
    #
    :   CAA has not ruled out talking to node $good_targets
    #
    if [[ -n $_SPOC_FORCE ]] && /usr/lib/cluster/incluster
    then
	#
	:   It is possible that the target node list contains names
	:   that do not correspond to CAA host names after the CAA
	:   cluster is created.  
	#   Before the CAA cluster is created, all target nodes are
	#   naturally not in a CAA cluster.  Ordinarily, this can be
	#   left to clhaver to find, though it does not distinguish
	#   between nodes it cannot connect to, and nodes that are
	#   that are not in the CAA cluster.  If the force flag was
	#   specified, and we are already in a CAA cluster, 
	:   Silently elide names in the target list that do not 
	:   correspond to CAA host names.
	#
	save_targets=$good_targets
	good_targets=""
	for given_node in $save_targets
	do
	    if cl_query_hn_id -q -i $given_node >/dev/null 2>&1
	    then
		good_targets=${good_targets:+"${good_targets} "}${given_node}
	    else
		print "$(date) ${_CMD}._cspoc_verify[$LINENO]: Given target \"$given_node\" cannot be converted to a CAA host name.  It will be skipped." >> $clutilslog
	    fi
	done
    fi 
    if [[ -n $good_targets ]]
    then
	#
	:	CAA thinks that nodes \"$good_targets\"
	:	are active.  See if clcomd can talk to them, 
	:	and what level of PowerHA is present.
	#
	clhaver -c $_VER $good_targets | \
	while IFS=: read ha_node caa_host VRMF
	do
	    if [[ -z $caa_host ]]
	    then
		#
		:   Add $ha_node, which clhaver cannot communicate to 
		:   through clcomd, to the list of nodes not to try 
		:   to run the command on.
		#
		down_ha_nodes=${down_ha_nodes:+"${down_ha_nodes} "}${ha_node}
	    elif (( $VRMF < $_VER ))
	    then
		#
		:   Add $ha_node to the list of nodes below the minimum
		:   HA release level.
		#
		bad_level_nodes=${bad_level_nodes:+"${bad_level_nodes} "}${ha_node}
	    fi
	done
	if [[ -n $bad_level_nodes ]]
	then
	    #
	    :   Nodes \"$bad_level_nodes\" report that they are running a
	    :   version of PowerHA below the required level $_VERSION
	    #
	    if [[ -z $_SPOC_FORCE ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because the following nodes are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes"
	    elif [[ $bad_level_nodes == $good_targets ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because all nodes are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes"
	    else
		#
		:   If force was specified, command processing continues
		:   but skips nodes \"$bad_level_nodes\"
		#
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will be run, but not on the following nodes, which are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes" 
	    fi
	    down_ha_nodes=${down_ha_nodes:+"${down_ha_nodes} "}${bad_level_nodes}
	    _RETCODE=1
	fi 
    fi
    if [[ -n $down_ha_nodes ]]
    then
	#
	:   The nodes in \$down_ha_nodes, \"$down_ha_nodes\", are not acceptable
	:   targets for this command, either because CAA says they are down,
	:   or clcomd cannot talk to them, or they are running too far a back
	:   level of PowerHA.  Remove them from the list of C-SPOC target nodes.
	#
	save_targets=""
	for ha_node in $good_targets
	do
	    if [[ $down_ha_nodes != @(?(* )${ha_node}?( *)) ]]
	    then
		save_targets=${save_targets:+"${save_targets} "}${ha_node}
	    fi
	done
	good_targets=$save_targets
	bad_targets=$(IFS=, set -- $down_ha_nodes ; print "$*" )
	if [[ -z $good_targets ]]
	then
	    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
	    "%s[%d]: The command will not be run because all of the target nodes, %s, are not reachable\n" "$_CMD" $LINENO "$bad_targets"
	    exit 1
	elif [[ -n $bad_targets ]]
	then
	    if [[ -z $_SPOC_FORCE ]]
	    then
		if [[ $bad_targets == @(*,*) ]]
		then
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will not be run because the target nodes, %s, are not reachable\n" "$_CMD" $LINENO "$bad_targets"
		else
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will not be run because the target node, %s, is not reachable\n" "$_CMD" $LINENO "$bad_targets"
		fi
		_RETCODE=1
	    else
		if [[ $bad_targets == @(*,*) ]]
		then
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will be run, but not on the unreachable nodes %s\n" "$_CMD" $LINENO "$bad_targets"
		else
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will be run, but not on the unreachable node %s\n" "$_CMD" $LINENO "$bad_targets"
		fi
	    fi
	fi
    fi
    _TARGET_NODES=$(IFS=, set -- $good_targets ; print "$*" )
    #
    :   \$_TARGET_NODES, \"$_TARGET_NODES\", is a list of nodes that are 
    :   up, contactable by clcomd, and running a reasonably up to date
    :   level of PowerHA.
    #
    return $_RETCODE
} # End of "_cspoc_verify()"
################################################################################
#
#   Start of main, Main, MAIN
#
################################################################################
if [[ -n $_DEBUG ]]
then
    print "\n[C-SPOC Initialization Started version 1.16.7.9"
fi
_VER=${_VER:-"6100"}
_VERSION=${_VERSION:-"6.1.0.0"}
export CLUSTER_OVERRIDE="yes"   # Allow CAAC commands to run...      710
_CMD=${0##*/}
integer TRY_RC=0
#
: since root is needed to determine node lists and what not - clgetaddr
: we may as well disable everything right here right now.  By putting
: in an explicit check we can provide a more intuitive message rather
: than something about not being able to execute some command later on.
#
if [[ $(whoami) != "root" ]] && ! ckauth PowerHASM.admin
then
    nls_msg -2 -l ${cspoc_tmp_log} 4 52 \
    "%s: All C-SPOC commands require the user to either be root, or have PowerHASM.admin authorization\n" "$_CMD"
    exit 2
fi
#
: Set a default value, unless this script is called from SMIT, in which
: case _CSPOC_MODE will already be defined.  By default, this should determine
: what the request mode type.
#
export _CSPOC_MODE=${_CSPOC_MODE:-"both"}
#
: By default, assume that we are being called from the command line
#
export _CSPOC_CALLED_FROM_SMIT=${_CSPOC_CALLED_FROM_SMIT:-"false"}
#
: Make sure that the _CMD_ARGS variable is visible everywhere
#
export _CMD_ARGS=""
[[ -n $_DEBUG ]] && print "\n[Parsing Command Line Options ... ]"
#
:   Tell clencodearg to skip the special escape processing for '='
#
if [[ $SKIP_EQ_ESC == true ]]
then
    export _ENCODE_ARGS="-e"
else
    export _ENCODE_ARGS=""
fi
_CSPOC_OPT_STR=${_CSPOC_OPT_STR:--}
_OPT_STR=${_OPT_STR:--}
_getopts "$_CSPOC_OPT_STR" "$_OPT_STR" "$@" || exit 1
if [[ -n $_DEBUG ]]
then
    print "_CMD_ARGS=${_CMD_ARGS}"
    print "_NUM_CMD_ARGS=${_NUM_CMD_ARGS}"
    print "_NON_FLG_ARGS=${_NON_FLG_ARGS}"
    print "\n[Getting Cluster Node List ... ]"
fi
#
:   Determine the nodes in the cluster, and the nodes to which this operation
:   aplies.
#
export ODMDIR=/etc/objrepos
_get_node_list || exit 1
_get_target_nodes || exit 1
if [[ -n $_DEBUG ]]
then
    print "_CLUSTER_NODES=${_CLUSTER_NODES}"
    print "\n[Verifying C-SPOC Requirements ... ]"
fi
if [[ -z $clutilslog ]]
then
   clutilslog=$(clodmget -q 'name = clutils.log' -f value -n HACMPlogs)"/clutils.log"
fi
#
:   If not all nodes are reachable, stop now, unless the "force" flag was
:   specified, implying continue despite unreachable nodes
#
_cspoc_verify || {
    [[ -z $_SPOC_FORCE ]] && exit 1
}
if [[ -n $_DEBUG ]]
then
    print "\n[C-SPOC Initialization Completed.]"
    print "DEBUG: Entering ${0##*/}"
    (( $_DEBUG >= 8 )) && set -x
fi
# Include the lvm utilities for physical volumes
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2020,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/cspoc/plans/lvm_utils.cel 1.61.1.8 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1998,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/plans/lvm_utils.cel, 726, 2147A_aha726, Feb 05 2021 09:50 PM 
#
###############################################################################
#
# _get_physical_volumes
#
# Grab the physical volume names from the command line, if any were provided.
# Also keep a record of the physical id's of those volumes from the reference
# node.
#
# Variables used:
#
#    _CMD_ARGS
#
# Variables set:
#
#    _DNAMES    -  space separated list of physical disk names
#    _EDNAMES   -  space separated list of encoded physical disk names
#    _REFNODE   -  the reference node provided by the user with -R
#
###############################################################################
function _get_physical_volumes
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _get_physical_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_get_physical_volumes
	    set -x
	}
    }
    typeset DISKS
    typeset ENODE
    typeset PV
    _DNAMES=""
    ENODE=""
    _REFNODE=""
    #
    :	If the -R switch was provided on the command line to identify a
    :	reference node, pick up that node name
    #
    ENODE=$(print -- $_CMD_ARGS | sed -n 's/.*\-R *\([^ ]*\).*/\1/p')
    [[ -n $ENODE ]] && {
	#
	:   Remove the -R switch, and its argument, from the command line
	:   and save away the reference node.
	#
	_CMD_ARGS=$(print -- $_CMD_ARGS | sed -e 's/\-R *[^ ]*//')
	_REFNODE=$(print -- $ENODE | cldecodearg)
    }
    #
    :	At this point, the expectation is that the command as entered ended in
    :	a list of hdisk names.  These have been collected by cl_init into
    :	_NON_FLG_ARGS so called because they are not preceeded by a flag such
    :	as "-d"
    #
    DISKS=${_NON_FLG_ARGS##+([ ])}          # trim leading blanks
        #
    :	If no disks were provided, a reference node is redundant
    #
    if [[ -z $DISKS ]]
    then
	[[ -n $_REFNODE ]] && {
	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 23 "${_CMD}: No disks provided.  Ignoring -R option.\n" ${_CMD} 
	    _REFNODE=""			    # avoid processing reference node later
	}
    else
        #
	:   If disk names were given, trim them off of the string of the
	:   complete set of arguments to this command.  This is so that when
	:   they have been resolved relative to the reference node, they can
	:   just be appended back onto the string of other arguments.
        #
	_CMD_ARGS=${_CMD_ARGS%% ${DISKS}}
        #
        :   Physical volumes were provided - a reference node is required.
        :   That is, since hdisk names are not guaranteed unique across the
        :   cluster, we have to know on which node are the names meaningful.
        #
	[[ -z $_REFNODE ]] && {
	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 22 "${_CMD}: The -R switch is required when providing physical volumes.\n" ${_CMD} 
	    exit 1
	}
        #
        :   Create a space separated list of these physical disk names, in
        :   both encoded and decoded form.
        #
	for PV in $DISKS
	do
	    _EDNAMES=${_EDNAMES:+"${_EDNAMES} "}"${PV}"
	    _DNAMES=${_DNAMES:+"${_DNAMES} "}"$(print -- $PV | cldecodearg)"
	done
    fi
}
###############################################################################
#
# _verify_physical_volumes
#
# Verifies that the physical disks provided on the command line are valid
# for the volume group being operated upon.
#
# Arguments:
#
#    _VG        -  the volume group
#    _CHECK_VG  -  true if we should verify physical volumes against those that
#                  belong to the volume group.
#		-  false if we should verify that the given physical volumes
#		   belong to no volume group
#    _CHECK_ALL -  true if we should determine if the user selected all disks
#                  belonging to the volume group.
#    _NODE	-  Node (typically the reference node) on which the disk names
#		   are valid
#
# Variables used:
#
#    _DNAMES    -  the list of physical disk names
#
# Variables set:
#
#    _PVID_LIST - the list of PVID's for the provided physical disk names.
#    _EDNAMES   -  the list of encoded physical disk names on the node on
#		    which the command will be run
#    _SELECTED_ALL  - set to "true" if _DNAMES contains all the disks in the 
#		      volume group, to "false" otherwise
#    _IMPORT_PVID   - PVID to use to pick up volume group changes across the
#		      cluster
#
###############################################################################
function _verify_physical_volumes
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _verify_physical_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_verify_physical_volumes
	    set -x
	}
    }
    #
    :	Check for proper input
    #
    (( $# < 3 )) && {
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 55 "${_CMD}: wrong number of arguments\n" ${_CMD} 
	exit 1
    }
    #
    :	Check to see if any work is required here
    #
    if [[ -z $_DNAMES && $_CSPOC_MODE == "concurrent" && -z $I_NODES ]] && 
       [[ -z $CA_FLAG ]] 
    then
	#
	:	If the list of disks provided by the user is empty, and this is
	:	a concurrent request, and the volume group is varyed on all nodes,
	:	we do not need to go any further, since we will not have to be doing
	:	any importing.  
	#
	return
    fi
    #
    :	For serial requests, or situations where a concurrent
    :	volume group is not online on all nodes, or where an explicit
    :	corrective action has been requested, an _IMPORT_PVID is still needed.
    #
    typeset _VG="$1"
    typeset _CHECK_VG="$2"
    typeset _CHECK_ALL="$3"
    typeset _NODE="$4"
    typeset _AVL_DISKS=""
    typeset _BAD_DISKS=""
    typeset _SVG
    typeset _CLNODE
    typeset PV
    typeset _D
    typeset _USE_REFNODE
    typeset _DISK=""
    typeset _disk_info=""
    #
    :	Check to see if the list of disks has to be resolved with respect to
    :	the reference node.  If no reference node is given, or if the
    :	reference node is the same as the node on which the command is going
    :	to be run, no such resolution is required.
    #
    if [[ -n $_REFNODE && $_REFNODE != $_NODE ]]
    then
	_USE_REFNODE="true"
    else
	_USE_REFNODE="false"
    fi
    if [[ $_CHECK_VG == "true" ]]
    then
	#
	:   Verify that the physical volumes belong to the volume group
	:   provided
	#
	_SVG=$_VG
    else
	#
	:   Verify that the physical volumes belong to no volume group
	#
	_SVG=None
    fi
    #
    :	If the given disk names in $_DNAMES has to be interpreted with respect
    :	to a reference node, find out what names are in use there.
    #
    if [[ $_USE_REFNODE == "true" ]]
    then
	if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
	then
	    print "DEBUG: Obtaining physical volumes from _REFNODE ($_REFNODE)"
	fi
	#
	:   Get the physical volume information from the reference node
	#
	E_LSPV_CMD=$(print "LC_ALL=C lspv -L" | clencodearg -e)
cel_f1
	(( $TRY_RC != 0 )) && exit 1
	#
	:   Parse the output of the lspv command to find the disk names and
	:   PVIDs on that node
	#
	while read _out_node _out_disk _out_pvid _out_vg _out_rest ; do
	    if [[ $_out_vg == $_SVG && $_out_pvid != [Nn]one ]]
	    then
		#
		:   Add $_out_disk to the list of disks in that volume group
		:   on that node
		#
		_AVL_DISKS=${_AVL_DISKS:+"${_AVL_DISKS} "}$_out_disk
	    fi
	    if [[ $_DNAMES == @(?(* )$_out_disk?( *)) ]]
	    then
		#
		:   Add $_out_disk to the list of PVIDs for the disks provided by 
		:   the user.  This will be used for getting the physical volume 
		:   names on the node where the command will actually be run, if 
		:   it is different from the reference node.
		#
		_PVID_LIST=${_PVID_LIST:+"${_PVID_LIST} "}$_out_pvid
	    fi
	done < $try_out
	#
	:   Save a pointer to the lspv output on this node so that we can get
	:   to it later if we have to.
	#
	_disk_info=$try_out
    fi	
    #
    :	Now, get the list of physical volumes from the node on which we will
    :	run the command.
    #
    if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
    then
	print "DEBUG: Obtaining physical volumes on node $_NODE"
    fi
    TRY_RC=0
    E_LSPV_CMD=$(print "LC_ALL=C lspv -L" | clencodearg -e)
cel_f2
    (( $TRY_RC != 0 )) && exit 1
    #
    :	If we did not have to use the reference node, then set the list of
    :	available volumes here, as well as the list of physical volume ids
    #
    if [[ $_USE_REFNODE == "false" ]]
    then
	#
	:   Parse the output of the lspv command to find the disk names and
	:   PVIDs on that node
	#
	while read _out_node _out_disk _out_pvid _out_vg _out_rest ; do
	    #
	    :	Create a list of the names of the disks in that volume group
	    :	on that node
	    #
	    if [[ $_out_vg == $_SVG && $_out_pvid != [Nn]one ]]
	    then
		_AVL_DISKS=${_AVL_DISKS:+"${_AVL_DISKS} "}$_out_disk
	    fi
	    #
	    :   Create a list of PVIDs for the disks provided by the user.  This
	    :   will be used for getting the physical volume names on the node
	    :   where the command will actually be run, if it is different from
	    :   the reference node.
	    #
	    if [[ $_DNAMES == @(?(* )$_out_disk?( *)) ]]
	    then
		_PVID_LIST=${_PVID_LIST:+"${_PVID_LIST} "}$_out_pvid
	    fi
	done < $try_out
	#
	:   If we are going to use the second set of lspv output, set the
	:   pointer to it, since the temp file will have a different name.
	#
	_disk_info="$try_out"
    fi
    #
    :	If we were not called from SMIT, verify that all the disks passed on
    :	the command line are valid.  SMIT only shows those disks that are
    :	valid, so the user can not provide any that are bad.
    #
    if [[ $_CSPOC_CALLED_FROM_SMIT == "false" ]]
    then
	# 
	:   Collect the names of the given disks that do not show up on the
	:   target node
	#
	for PV in $_DNAMES
	do
	    [[ $_AVL_DISKS != @(?(* )$PV?( *)) ]] && \
		_BAD_DISKS=${_BAD_DISKS:+"${_BAD_DISKS} "}${PV}
	done
	[[ -n $_BAD_DISKS ]] && {
	    if [[ $_SVG == "None" ]] ; then
		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 20 "${_CMD}: Physical volumes ($_BAD_DISKS) are invalid on node $_REFNODE\n" ${_CMD} $_BAD_DISKS $_REFNODE 
	    else
		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 21 "${_CMD}: Physical volumes ($_BAD_DISKS) are not allocated to volume group $VG\n" ${_CMD} $_BAD_DISKS $VG 
	    fi
	    exit 1
	}
    fi
    # 
    :	Determine the physical volume id to use for importing changes on other
    :	nodes in the cluster.  We want a disk that will be in the volume group
    :	when the operation is complete, and, preferrably, one that was in the
    :	volume group before the operation, too.
    # 
    if [[ $_CHECK_ALL == "true" ]]
    then
	#
	:   Check to see if the user listed all physical volumes in the volume
	:   group
	#
	if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
	then
	    print "DEBUG: Checking for all disks on command line"
	fi
	#
	:   Check to see if there is any disk in the volume group that was not
	:   in the given list of disks
	#
	for _D in $_AVL_DISKS
	do
	    [[ $_DNAMES != @(?(* )$_D?( *)) ]] && {
		_DISK="$_D"
		break
	    }
	done
	#
	:   The expected use of _CHECK_ALL == true is for operations like
	:   reducevg and unmirrorvg that remove disks from the volume group.
	:
	:    + if all disks have been selected, none can be used for
	:      importvg, and the _SELECTED_ALL flag will indicate this.
	:
	:    + if not all disks have been selected, _DISK will contain that
	:      was not selected, and can be used for importvg once the
	:      reducevg is done
	#
	if [[ -z $_DISK ]]
	then
	    _SELECTED_ALL="true"
	else
	    _SELECTED_ALL="false"
            _IMPORT_PVID=$(grep -w $_DISK $_disk_info | read node disk pvid rest ; print $pvid)
	fi
    else
        #
        :   It was not expected that all disks in the volume group could be
        :   listed - this is not the reducevg case.  Pick an existing disk in
	:   the volume group.  
        #
	_IMPORT_PVID=$(grep -w $_VG $_disk_info | grep -v [Nn]one | read node disk pvid rest ; print $pvid)
	if [[ -z $_IMPORT_PVID && -n $_DNAMES ]]
	then
	    #
	    :	If we had not found any disks in the volume group - which could
	    :	happen on importing a new volume group - pick one of the given
	    :	disks.
	    #
            print $_DNAMES | read _DISK rest
            _IMPORT_PVID=$(grep -w $_DISK $_disk_info | \
                           read node disk pvid rest
                           if [[ $pvid == +([[:xdigit:]]) ]]
                           then
                               print $pvid
                           fi)
        fi
    fi
    #
    :	If no disks were provided, we do not need to go any further
    #
    [[ -z $_DNAMES ]] && 
	return
    rm -f $_disk_info
    #
    :	If we have a PVID_LIST and a reference node, we need to translate 
    :	the physical ids into physical names on the reference node.
    #
    if [[ -n $_PVID_LIST && $_USE_REFNODE == "true" ]] 
    then
	[[ -n $_DEBUG ]] && print "DEBUG: Translating PVID_LIST"
	_EDNAMES=""
        for _P in $_PVID_LIST
        do
	    _DISK=$(grep -w $_P $try_out | read node disk rest ; print $disk)
	    if [[ -n $_DISK ]]
	    then
		#
		:   Here build the encoded list of disk names for the command
		#
		_EDNAMES=${_EDNAMES:+"${_EDNAMES} "}$(print -- $_DISK | clencodearg)
	    fi
        done
    fi
}
###############################################################################
#
# _verify_replicated_volumes
#
# Verifies that the volume group, being operated on, is in a Replicated Resource Group.
#
# Arguments:
#
#    _VG            -  the volume group
#    _CLNODES       -  The nodes where the volume groups would be imported
#    _ACTIVE_NODE   -  the reference node where the VG is varied on
#
###############################################################################
function _verify_replicated_volumes
{
    if [[ -n "$_DEBUG" ]] 
    then
	print "DEBUG: Entering _verify_replicated_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_verify_replicaed_volumes
	    set -x
	}
    fi
    (( $# != 3 )) && {
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 55 "${_CMD}: wrong number of arguments\n" ${_CMD} 
	exit 1
    }
    typeset _VG="$1"
    typeset _ACTIVE_NODE="$2"
    typeset _CLNODES="$3"
    typeset _REP_VOL
    typeset _PPRC_REPRESOURCE
    typeset _ERCMF_REPRESOURCE
    typeset _SVCPPRC_REPRESOURCE
    typeset _SR_REPRESOURCE
    #
    :	Verify that this Volume Group contains Replicated Volumes.
    :	This is to verify that the changes made at one site will be propagated
    :	to the remote DASD. CSPOC operations will not be allowed if the changes 
    :	will not be known at the remote site.
    #
    # Replicated Volume Types:
    #       IBM PPRC 
    #       IBM GeoMirror
    #       IBM eRCMF
    #       IBM SVC PPRC
    #       EMC SRDF®
    #
    # IBM PPRC  Replicated Volumes 
    # 
    export ODMDIR=/etc/objrepos
    _REP_VOL=$(/usr/es/sbin/cluster/utilities/clodmget -q value="$_VG" -f group -n HACMPresource)
    if [[ -z $_REP_VOL ]]
    then
	#
	:   A volume group that is not in a resource group is not a replicated resource
	#
	return
    fi
    _PPRC_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f PPRC_REP_RESOURCE -n HACMPresource)
    _ERCMF_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f ERCMF_REP_RESOURCE -n HACMPresource)
    _SVCPPRC_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f SVCPPRC_REP_RESOURCE -n HACMPresource)
    _SR_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f SR_REP_RESOURCE -n HACMPresource)
    if [[ -z "$_PPRC_REPRESOURCE" && -z "$_ERCMF_REPRESOURCE" && -z "$_SVCPPRC_REPRESOURCE" && -z "$_SR_REPRESOURCE" ]]
    then
       #
       : This VG is not a PPRC Replicated Resource of any of the supported tupes
       #
       return 0
    fi
    #
    :	Verify that the cluster is active on the node with 
    :	the Volume Group varied on $_ACTIVE_NODE.
    #
    E_ACTIVE_NODE=$(print $_ACTIVE_NODE | clencodearg)
cel_f3
    if (( $cel_rc >= 1 )) 
    then
	#
	:   Cluster is active on node with $_VG varied on. Allow CSPOC operations.
	:   Lazy update will enable the changes to be made at the remote
	:   site after failover
	#
	return 0
    fi
    #
    :	The cluster is not active on the node with $_VG varied on.   Further
    :	processing depends on the resource type
    #
    if [[ -n "$_PPRC_REPRESOURCE" ]]
    then
	#
	: This is a PPRC Replicated Resource
	: Verify that the cluster is active on the node with 
	: the Volume Group varied on $_ACTIVE_NODE
	#
	res_type="PPRC"
	verify_cmd=/usr/es/sbin/cluster/pprc/utils/cl_verify_pprc_cspoc
    fi   
    if [[ -n "$_SVCPPRC_REPRESOURCE" ]]
    then
	#
	:   This is an SVC PPRC Replicated Resource
	:   Verify that the cluster is active on the node with 
	:   the Volume Group varied on $_ACTIVE_NODE
	#
	res_type="SVC PPRC"
	verify_cmd=/usr/es/sbin/cluster/svcpprc/utils/cl_verify_svcpprc_cspoc
    fi   
    if [[ -n "$_SR_REPRESOURCE" ]]
    then
	#
	:   This is an EMC SRDF® Replicated Resource
	#
	res_type="EMC SRDF®"
	verify_cmd=/usr/es/sbin/cluster/sr/utils/cl_verify_sr_cspoc
    fi   
    if [[ -n $res_type ]]
    then
	#
	:   The Cluster is not active on the node with vg varied on. If the pprc
	:   pair is not in a full-duplex state, changes made on this node may not
	:   be known at the remote ODM. Verify that the CSPOC operations will be 
	:   run on nodes that are on the same site. CSPOC operations should 
	:   succeed in this case.
	#
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 60 "WARNING: $_VG is a $res_type Replicated Resource.\n"  $_VG "$res_type"
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 61 "	 Since the cluster is NOT active on node $_ACTIVE_NODE with $_VG active,\n" $_ACTIVE_NODE $_VG
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 62 "	 the CSPOC operation may not succeed on the remote peers.\n"
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 63 "Verifying $res_type pair state ...\n" "$res_type"
	#
	:   Convert the comma separated list of nodes in _CLNODES 
	:   into space separated list SP_CLNODES
	#
	SP_CLNODES=$(IFS=, set -- $_CLNODES ; print $* )
	if ! $verify_cmd $_VG $_ACTIVE_NODE $SP_CLNODES
	then
	    nls_msg -2 -l $cspoc_tmp_log ${_MSET} 9999 "The state of the $res_type pair does not allow the CSPOC operation at this time.\n" $res_type
	    exit 1
	else
	    return 0
	fi 
    fi
}
###############################################################################
#
#
#   Name:	_lv_status
#
#
#   Input:	1. flag for clgetvg - either "-l" or "-f"
#		2. corresponding value, either logical volume or file system,
#		   in encoded form
#
#		Variables used by this function
#
#		_SPOC_FORCE - force flag set
#		_TARGET_NODES - list of nodes from command line on which this
#		    is to be made
#
#
#   Function:	Call clgetvg on each node on which the logical volume
#		operations is going to perform until one reports back the name
#		of the owning volume group.   That gets passed through to
#		_vg_status, which indicates the state of the volume group on
#		each node.
#
#		Note that it is assumed that the logical volume is known on at
#		least one of the nodes in _TARGET_NODES; it is an error for
#		this routine to be invoked with a completely unknown logical
#		volume.
#
#
#   Output:	Variables set by this function
#
#		VG  - encoded name of the owning volume group
#		DVG - decoded (readable) name of the owning volume group
#
#		Note that these have to have been defined by the caller in
#		order for the caller to pick up these values.
#
#
#   Returns:    Normally returns to caller with output variables set
#               On error, will exit with a message; does not return to caller
#
#
################################################################################
function _lv_status
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _lv_status for $1 $(print $2 | cldecodearg) version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_lv_status
	    set -x
	}
    }
    integer TRY_RC=1
    option=$1			    # either '-l' or '-f'
    parameter=$2		    # either logical volume or file system name
    #
    :	Since its only necessary to find the owning volume group once - LV
    :	names assumed to be unique across the cluster - check to see if the
    :	local node is one of the ones that should know about it.  Local tests
    :	are faster
    #
    LOCAL_NODE=$(get_local_nodename)
    if [[ $_TARGET_NODES == @(?(*,)$LOCAL_NODE?(,*)) ]] 
    then
	uu_parm=$(print $parameter | cldecodearg)
	DVG=$(clgetvg $option $uu_parm 2>/dev/null)	# supress any 'not found' msg
	TRY_RC=$?
    fi
    #
    :	If not successfully found locally, look across the rest of the cluster
    #
    if (( $TRY_RC != 0 )) || [[ -z $DVG ]]
    then
	#
	:   Find which VG contains the LV, asking each of the nodes in turn, if
	:   necessary
	#
cel_f4
	read A DVG < $try_out		    # decoded (readable) volume group name
	rm -f $try_out			    # otherwise next call just appends
    fi
    (( $TRY_RC != 0 )) &&		    # No node knows of this logical volume
	exit 1
    VG="$(print $DVG | clencodearg)"	    # encoded volume group name
    #
    :       Determine the activation status of the volume group across the
    :       cluster.  This tells us where to run the command.
    #
    _vg_status
}					    # end _lv_status
###############################################################################
#
#
#   Name:	_vg_status
#
#
#   Input:	Variables used by this function
#
#		VG - volume group name, encoded
#		DVG - volume group name, decoded
#		_SPOC_FORCE - force flag set
#		_TARGET_NODES - list of nodes from command line on which this
#		    is to be made
#
#
#   Function:	Call clresactive on each node on which the volume group
#		operation is going to be performed.  This will pass back
#		status from lsvg.  Provide in CL_NODE a choice for the node to
#		run a command against this volume group.
#
#
#               "-u"    A volume group not known on any node is not an error
#
#
#   Output:	Variables set by this function
#
#		CL_NODE - node on which to run operation against this volume
#			  group
#		_CSPOC_MODE - if set to "evaluate", and the volume group mode
#			  can be determined from the current activation
#			  state or defintions, set to "concurrent" or "shared"
#               VG_ACTIVE - flag indicating type of activation done on CL_NODE
#                           "S" - was already active 
#                           "I" - was originally inactive
#                           "P" - was originally in passive mode
#                           "C" - was selected from the concurrent list
#
#		The following space separated lists:
#
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode (varyonvg -c)
#			  If VG_ACTIVE == C, then CL_NODE is also in this list
#		S_NODES - nodes on which the volume group is vary'd on in serial
#			  mode (varyonvg)
#			  If VG_ACTIVE == S, then CL_NODE is also in this list
#		I_NODES - nodes on which the volume group is inactive (varyoffvg)
#			  If VG_ACTIVE == I, then CL_NODE was removed from this
#			  list
#		P_NODES - nodes on which the volume group is vary'd on in passive
#			  mode (varyonvg -c -P)
#			  If VG_ACTIVE == P, then CL_NODE was removed from this
#			  list
#		O_NODES - nodes on which the volume group is unknown
#		G_NODES - nodes on which the volume group is known
#
#
#   Returns:    Normally returns to caller with output variables set
#               On error, will exit with a message; does not return to caller
#
#
################################################################################
function _vg_status
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_status version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_vg_status
	    set -x
	}
    }
    #
    :   Pick up any passed options
    #
    u_flag=""
    while getopts ":u" option ; do
        case $option in
            u )
                u_flag="true"
                ;;
            * )
		shift $((OPTIND - 1))
                dspmsg scripts.cat 6555 "Option \"$option\" is not valid\n" "$option"
                return 1
                ;;
        esac
    done
    # 
    :	Check all the nodes relevant to this operation, to see what the current
    :	state of the volume group is on those nodes.
    # 
    integer TRY_RC=0
cel_f5
    (( $TRY_RC != 0 )) && 
	exit 1
    #
    :	Collect that state into local variables from the file $try_out, where
    :	it was collected from running clresactive on each node.  The format of
    :	the file is:
    :	"node_name: <status>"
    #
    C_NODES=""                          # clean out leftover values
    S_NODES=""
    I_NODES=""
    P_NODES=""
    O_NODES=""
    G_NODES=""
    while read node status rest ; do	# parse the line of $try_out
	case $status in			# note the status
	    concurrent )
		    type=C
		;;
	    active )
		    type=S
		;;
	    inactive )
		    type=I
		;;
	    passive )
		    type=P
		;;
	    no | * )
		    type=O
		;;
	esac
	#
	:   Add the node name minus the trailing ':' to the appropriate
	:   list:
	:	C_NODES - varyed on in concurrent mode - varyonvg -c
	:		  note that active mode 'varyonvg -c -A' also shows up
	:		  as 'concurrent'
	:	S_NODES - varyed on in normal mode - varyonvg
	:	I_NODES - not varyed on at all - varyoffvg
	:	P_NODES - varyed on in passive mode - varyonvg -c -P
	:	O_NODES - not known on that node - exportvg
	#
	eval ${type}_NODES=\${${type}_NODES:+\$${type}_NODES" "}${node%:}
	if [[ $type != O ]]		# status is not 'unknown'
	then
	    #
	    :	Additionally, keep a list of nodes on which the volume group
	    :	is at least defined, independent of its current state.
	    #
	    G_NODES=${G_NODES:+$G_NODES" "}${node%:}
	fi
    done < $try_out			# line at a time into the read statement
    rm -f $try_out				# otherwise next call just appends
    [[ -n $_DEBUG ]] && (( $_DEBUG >  4 )) && {
        print "DEBUG: Status of the volume group $DVG across nodes $_TARGET_NODES"
        print "DEBUG:   Concurrent = $C_NODES"
        print "DEBUG:   Active = $S_NODES"
        print "DEBUG:   Inactive = $I_NODES"
        print "DEBUG:   Passive = $P_NODES"
        print "DEBUG:   volume group is unknown = $O_NODES"
        print "DEBUG:   volume group is known = $G_NODES"
    }
    #
    :	Some C-SPOC commands work on both concurrent and shared volume groups.
    :	The intent is either flagged through SMIT, or must be determined
    :	dynamically.  If a dynamic determination has not yet been made, see if
    :	we can do so now, based on the known activation status.
    #
    if [[ -z $_CSPOC_MODE || $_CSPOC_MODE == "evaluate" ]] 
    then
	if [[ -n $C_NODES ]]
	then
	    #
	    :   Varyed on in concurrent mode
	    #
	    _CSPOC_MODE="concurrent"
	elif (( 1 < $(print $S_NODES | wc -w) ))
	then
	    #
	    :	Implicitly on in RAID concurrent mode on more than one node
	    #
	    _CSPOC_MODE="concurrent"
	elif [[ -n $S_NODES ]]
	then
	    #
	    :	Ordinary vary on at most one node
	    #
	    _CSPOC_MODE="shared"
	elif [[ -n $P_NODES ]]
	then
	    #
	    :	Passive vary on implies a shared resource
	    #
	    _CSPOC_MODE="shared"
	#
	:   We could not determine the mode from the activation state.  This
	:   would be the case when the volume group was varyed off cluster
	:   wide.  So, check the local ODM to see how its used.  The
	:   correctness of this operation depends on there being no
	:   unsynchronized changes across the cluster.
	#
	elif [[ -n $(odmget "-q name = CONCURRENT_VOLUME_GROUP and value = $DVG" HACMPresource) ]]
	then
	    #
	    :	Used as a concurrent volume group
	    #
	    _CSPOC_MODE="concurrent"
	elif [[ -n $(odmget "-q name = VOLUME_GROUP and value = $DVG" HACMPresource) ]]
	then
	    #
	    :	Used as a shared volume group
	    #
	    _CSPOC_MODE="shared"
	else
	    #
	    :   The volume group is not varied on anywhere, and not in a
	    :   resource group.  Assume shared, since that will work once
	    :   the volume group is varied on.
	    #
	    _CSPOC_MODE="shared"
	fi
    fi					    # end set _CSPOC_MODE
    #
    :	Correction for fast disk takeover
    #
    if [[ $_CSPOC_MODE == "concurrent" ]] && 
       (( 1 == $(print $C_NODES | wc -l) )) &&
       [[ -n $(odmget "-q name = VOLUME_GROUP and value = $DVG" HACMPresource) ]]
    then
	#
	:   An enhanced concurrent volume group used in active/passive
	:   mode for fast disk takeover will show up as being in
	:   concurrent mode on at most one node, but will be listed as
	:   a shared VOLUME_GROUP in HACMPresources
	#
	_CSPOC_MODE="shared"
    fi
    #
    :	Having found the status of the volume group across the cluster, pick
    :	a node that would be most appropriate to run the LVM or file system
    :	command of interest on.  Preferentially pick the local node if
    :	possible, otherwise just pick the first available.
    :
    :	At the end of this processing:
    :	    CL_NODE has the name of the node to use
    :	    VG_ACTIVE has an indication of the volume group currnet state
    #
    LOCAL_NODE=$(get_local_nodename)	    #	find out the local node name
    CL_NODE=""                              #   clean out any left over value
    if [[ -n $C_NODES ]] ; then		    #	in concurrent mode on some nodes
	#
	:   If the volume group is already varyed on in concurrent mode, pick
	:   a node from that list - preferentially, the local node - on which
	:   to run the command.
	#
	if [[ $C_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
	    CL_NODE=$LOCAL_NODE		    #	Use the local node
	else				    #	Local node is not a possibility 
	    echo $C_NODES | read CL_NODE rest #	So just use the first
	fi
	VG_ACTIVE="C"			    #	Picked from concurrent list
    elif [[ -n $S_NODES ]] ; then	    #	In shared mode on some nodes
	#
	:   The volume group can be active - ordinary varyonvg - on one or more
	:   nodes.  One node is the shared volume group case, multiple nodes
	:   would be expected in RAID concurrent mode.  Pick a node from that
	:   list - preferentially, the local node - on which to run the
	:   command.
	:   
	:   Note- it is up to the caller to decide if the operation should be
	:   allowed to proceed if the volume group is used in RAID concurrent
	:   mode on more than one node.  List operations will work, change,
	:   delete or create will not.
	#
	if [[ $S_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
	    CL_NODE=$LOCAL_NODE		    #	Use the local node
	else				    #	Local node is not a possibility 
	    echo $S_NODES | read CL_NODE rest #	So just use the first
	fi
	VG_ACTIVE="S"			    #	Used an active node
    else				    #	Not active anywhere
	#
	:   Since the volume group is currently varyed off, pick a node from
	:   the 'passive' or 'inactive' lists to vary it on.  Preferentially
	:   pick the local node.  The selected node is removed from the list,
	:   so that they remain accurate.
	#
	if [[ -n $P_NODES ]] ; then	    #	Look for passive nodes
	    if [[ $P_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
		CL_NODE=$LOCAL_NODE	    #	Use the local node
		P_NODES=$(echo $P_NODES | tr ' ' '\n' | grep -vw $LOCAL_NODE)
	    else			    #	Local node is not a possibility
		echo $P_NODES | read CL_NODE P_NODES  #	So just use the first
	    fi
	    VG_ACTIVE="P"		    #	Picked from passive list
	elif [[ -n $I_NODES ]] ; then	    #	Look for inactive nodes
	    if [[ $I_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
		CL_NODE=$LOCAL_NODE	    #	Use the local node
		I_NODES=$(echo $I_NODES | tr ' ' '\n' | grep -vw $LOCAL_NODE)
	    else			    #	Local node is not a possibility
		echo $I_NODES | read CL_NODE I_NODES  #	So just use the first
	    fi
	    VG_ACTIVE="I"		    #	Picked from inactive list
	else
            #
            :   For some operations, like importvg, its valid to have a volume
            :   group that is not currently known on any node.  In this case,
            :   the reference node must be valid.
            #
            if [[ $u_flag == "true" ]] ; then    #   unknown volume groups allowed
                VG_ACTIVE="O"                   #   And this is one of them 
                CL_NODE=$_REFNODE               #   the disks should be known here
            else
		#
		:   If for some reason it was not possible to find a node on which
		:   the volume group is or could be brought on line, the operation
		:   stops here.
		#
		nls_msg -2 -l $cspoc_tmp_log 24 6 "no node has access to $DVG\n" $DVG 
		exit 1
	    fi
	fi
    fi					    #	end by volume group state
}					    #	end _vg_status
################################################################################
#
#
#   Name:	_vg_active
#
#   Input:	Variables used by this function
#
#		VG - volume group name, encoded
#		DVG - volume group name, decoded
#		_SPOC_FORCE - force flag set
#		S_NODES - nodes on which the volume group is vary'd on in serial
#			  mode (varyonvg)
#		I_NODES - nodes on which the volume group is inactive (varyoffvg)
#		P_NODES - nodes on which the volume group is vary'd on in passive
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode
#		CL_NODE - node on which the volume group is active or to be
#			  activated
#		VG_ACTIVE - flag indicating type of activation done on CL_NODE
#			    "S" - was already active 
#			    "I" - was originally inactive
#			    "P" - was originally in passive mode
#			    "C" - was selected from the concurrent list
#		_DNAMES - list of physical disk names provided by
#			  _get_physical_volumes for those commands that use them
#
#		"-r"	- Volume group will be used for read/only operations
#			  only (e.g., display) and an _IMPORT_PVID is
#			  unnecessary
#		"-p"	- volume group need only be in passive mode for desired
#			  use
#               "-R"    - Returns 1 to the caller on failure to activate vg
#                         rather exiting. In such case CL_NODE is added to
#                         I_NODES and CL_NODE is emptied.
#
#
#   Function:	Ensure that the volume group is active on one of the nodes
#		with the intent of being able to run a command there; 
#		activate it if it is not currently active and force was
#		specified.  The volume group is activated on CL_NODE, as set
#		by _vg_status.
#
#
#   Output:	Variables set by this function
#
#		EPVID - encoded PVID which can be used for importvg
#
#
#   Returns:	Normally returns to caller with output variables set
#		On error, will exit with a message; does not return to caller
#
#
################################################################################
function _vg_active
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_active version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    set -x
	    typeset PROGNAME=_vg_active
	}
    }
    #
    :   Pick up any passed options
    #
    r_flag=""
    passive_only_flag=""
    typeset R_flag=""
    while getopts ":rRp" option ; do
        case $option in
            r )
                r_flag="true"
                ;;
            R )
                R_flag="true"
                ;;
	    p )
		passive_only_flag="true"
		;;
            * )
                shift $((OPTIND - 1))
                dspmsg scripts.cat 6555 "Option \"$option\" is not valid\n" "$option"
                return 1
                ;;
        esac
    done
    if [[ -z $CL_NODE ]] ; then
	#
	:   If for some reason it was not possible to find a node on which the
	:   volume group is or could be brought on line, the operation stops
	:   here.  CL_NODE was set to $CL_NODE in _vg_status
	#
	nls_msg -2 -l $cspoc_tmp_log 24 6 "no node has access to $DVG\n" $DVG 
        [[ -z $R_flag ]] && exit 1
	return 1
    fi
    if [[ $VG_ACTIVE == I || ( $VG_ACTIVE == P && -z $passive_only_flag ) ]] ; then
	#
	:   If the volume group needs to be brought on line, do so on the
	:   selected node.  clvaryonvg will do the appropriate kind of varyon.
	#
	TRY_RC=0
cel_f6
        (( $TRY_RC != 0 )) && {
            [[ -z $R_flag ]] && exit 1
            I_NODES="$CL_NODE $I_NODES"
            CL_NODE=""
            return 1
        }                                   #   varyon failed
    elif [[ $VG_ACTIVE == I && $passive_only_flag == "true" ]]
    then
	#
	:   If the volume group needs be brought online in passive mode
	:   only, invoke cl_pvo to do so
	#
	TRY_RC=0
cel_f7
        (( $TRY_RC != 0 )) && {
            [[ -z $R_flag ]] && exit 1
            I_NODES="$CL_NODE $I_NODES"
            CL_NODE=""
            return 1
	}
    fi					    #	end varyon
    [[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && 
	print "DEBUG: CL_NODE = $CL_NODE"
    #
    :	If there are inactive nodes whose information will have to be updated,
    :	get a useful PVID.
    #
    if [[ -z $EPVID && -z $r_flag && -z $passive_only_flag && ( -z $_REMOVED_VG || $_REMOVED_VG == "true" ) ]] 
    then
	#
	:   Check the disks, and find a PVID we can use for importvg later, if
	:   it has not been done in a prior check.
	#
	if [[ -z $_IMPORT_PVID ]]
	then
	    _verify_physical_volumes $DVG false false $CL_NODE
	    [[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && {
		print "DEBUG: _IMPORT_PVID = $_IMPORT_PVID"
	    }
	fi
	EPVID=$(echo $_IMPORT_PVID | clencodearg)
    fi
}					    #	end _vg_active
##############################################################################
#
#
#   Name:       _vg_sync
#
#   Input:	function request
#		    sync - synchronize
#		    release - restore volume group to original state
#		If no function request is passed, both functions are performed
#
#		chvg command
#		    lists a chvg command to be run on all nodes which must do
#		    an importvg -L, for those functions not picked up by
#		    importvg -L
#		
#		    This is valid only if the 'sync' function request is
#		    specified
#
#               Variables used by this function
#
#               VG - volume group name, encoded
#               DVG - volume group name, decoded
#               CL_NODE - node on which the volume group is active
#               I_NODES - nodes on which the volume group is inactive
#			  (varyoffvg)
#               P_NODES - nodes on which the volume group is vary'd on in
#			  passive mode (varyonvg -c -P)
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode
#               VG_ACTIVE - flag indicating type of activation done on CL_NODE
#                           "S" - was already active 
#                           "I" - was originally inactive
#                           "P" - was originally in passive mode
#			    "C" - was in concurrent mode
#		EPVID - encoded PVID of a disk in the volume group to use for 
#			importvg -L
#		_IMPORT_PVID - decoded PVID of a disk in the volume group to
#			use for importvg -L
#		_REMOVED_VG_ - operation resulted in the deletion of the
#			       volume group
#
#
#   Function:   Synchronize the updated volume group information across the 
#               cluster.   This ensures that the ODM information on each node
#               on which the volume group is defined actually matches what's
#               out on the disks, as modified by the operation.
#
#
#   Output:     None
#
#
#   Returns:    Normally returns to caller 
#               On error, will exit with a message; does not return to caller
#
#
##############################################################################
function _vg_sync
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_sync version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_vg_sync
	    set -x
	}
    }
        request=$1			    #	function requested by caller
    chvg_cmd=$2			    #	Any chvg command that has to be run
    integer TRY_RC=0		    #	return code from CSPOC operations
    integer SAVE_RC=0		    #	error did not stop operations
    _REMOVED_VG_=${_REMOVED_VG_:="false"}   #	operation deleted volume group
    if [[ -z $request || $request == "sync" ]] &&
       [[ $_REMOVED_VG_ == "false" ]] ; then
	if [[ -n $I_NODES && -n $EPVID ]] ; then
	    #
	    :   There are nodes on which volume group $DVG was inactive, and for
	    :   which the local ODM must be updated to match the volume group.  This
	    :   processing is skipped for nodes which have the volume group varyed on
	    :   in passive mode, since LVM does the updates automatically.
	    #
	    if [[ $VG_ACTIVE == S ]] ; then
		#
		:   The volume group $DVG was brought on line in shared mode -
		:   ordinary varyonvg - remove the reserve so that the other
		:   nodes can read the VGDA and VGSA information from the disks.
		#
cel_f8
		(( $TRY_RC != 0 )) && 
		    exit 1
            elif [[ $VG_ACTIVE == I ]] ; then
		# 
		:   The volume group $DVG was originally - before start of this
		:   C-SPOC plan - inactive, so vary it off again.  This will remove
		:   the reserve so that the other nodes can read the VGDA and
		:   VGSA information from the disks.
		# 
cel_f9
                (( $TRY_RC != 0 )) &&
                    exit 1
                #
                :   Get the time stamps in sync
                #
                cl_update_vg_odm_ts -o $DVG "$G_NODES"
                                    VG_ACTIVE=""
	    fi
	    #
	    :	Have each of the inactive nodes run "importvg -L" followed by
	    :	any needed chvg cmmand to update the local ODM, and update the
	    :	local HACMP timestamps for this volume group.
	    #
	    update_cmd="clupdatevg $DVG $_IMPORT_PVID"
	    #---------------------------------------------------
            :   - if remote script debugging is desired - VERBOSE_LOGGING_REMOTE=high
            :   - set up request so the output of the script at the remote node is
            :     saved in /var/hacmp/log/cspoc.log.remote
            #---------------------------------------------------
            if [[ $VERBOSE_LOGGING_REMOTE == "high" ]]
            then
                update_debug_env="VERBOSE_LOGGING=high"
                update_debug="2>&1 | tee >> /var/hacmp/log/cspoc.log.remote"
                update_cmd="$update_debug_env $update_cmd $update_debug"
            fi
	    if [[ -n $chvg_cmd ]] ; then
		update_cmd="$update_cmd && $chvg_cmd"
	    fi
	    e_update_cmd=$(echo $update_cmd | clencodearg -e)
	    NODE_LIST=$(IFS=, set -- $I_NODES ; print "$*")
cel_f10
	    #
	    :	Even if some nodes failed on the "importvg -L", we still need to
	    :	clean up below, so save the error for later
	    #
	    (( $TRY_RC != 0 )) && 
		SAVE_RC=1
	fi
    fi
    if [[ -z $request || $request == "release" ]] &&
       [[ $_REMOVED_VG_ == "false" ]] ; then
	#
	:   Now that all nodes have updated state, put volume group back $DVG into
	:   into the state we found it in - the state was set by _vg_active -
	:   assuming, of course, that it was not entirely removed in the
	:   operation
	#
	TRY_RC=0
	case $VG_ACTIVE in
	I)  #
	    :	The volume group $DVG was originally inactive.  If it was 
	    :	varyed off up above in the synchronization path, nothing needs
	    :	be done here
	    #
	    if [[ $request == release ]]
	    then
		#
		:   The volume group $DVG was originally inactive.  So, vary it off
		#
cel_f11
		(( $TRY_RC != 0 )) && 
		    exit 1
                #
                :   Get the time stamps in sync
                #
                cl_update_vg_odm_ts -o $DVG "$G_NODES"
                    		if [[ -n $P_NODES || -n $C_NODES || -n $S_NODES ]]
		then
		    #
		    :   On a successful varyoff, set the fence height to allow read only
		    :   access if there are any other nodes that are using this volume 
		    :	group.  This should preserve the volume group from inadvertent
		    :   modification by this node.
		    #
cel_f12
		fi
	    fi
	    ;;
	P)  #	
	    :	The volume group $DVG was originally varyed on in passive mode.  So,
	    :	return it to that mode on $CL_NODE
	    #
	    if [[ $passive_only_flag != "true" ]]
	    then
cel_f13
		(( $TRY_RC != 0 )) && 
		    exit 1
		#
		:   On a successful varyoff, set the fence height to allow read only
		:   access.  This should preserve the volume group from inadvertent
		:   modification by this node.
		#
cel_f14
	    fi
	    #
	    :   On a successful varyoff, set the fence height to allow read only
	    :   access.  This should preserve the volume group from inadvertent
	    :   modification by this node.
	    #
cel_f15
	    ;;
	S)  #
	    :	The volume group $DVG was originally active.  If we removed the reserves
	    :	up above, do another varyon to put them back on node $CL_NODE
	    #
	    if [[ -n $CL_NODE && -n $I_NODES ]]
	    then
cel_f16
		(( $TRY_RC != 0 )) && 
		    exit 1
	    fi
	    ;;
	C)  #
	    :	The volume group $DVG was in concurrent mode, nothing needs be
	    :	done
	    #
	    ;;
        O)  #
            :   The volume group $DVG was originally unknown and subsequently
            :   imported, nothing actually has to be done here
            #
            ;;
		esac
    fi
    if [[ $_REMOVED_VG_ == "true" && -n $I_NODES ]] ; then
	#
	:   The LVM operation resulted in the complete removal of the
	:   volume group $DVG, export it on the inactive nodes to get rid of their
	:   definitions, too.
	#
	NODE_LIST=$(IFS=, set -- $I_NODES ; print "$*")
cel_f17
	(( $TRY_RC != 0 )) && 
	    SAVE_RC=1
    fi
    return $SAVE_RC		#   Pass back any saved return code
}				#   End _vg_sync
# Include itlities for multi node disk hb
# @(#)99      1.14 src/43haes/usr/sbin/cluster/cspoc/plans/mndhb_utils.cel, hacmp.cspoc, 61haes_r714 3/20/08 16:52:56
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/cspoc/plans/mndhb_utils.cel 1.14 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 2007,2008 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# _MSET is expected to be set by modules including this. 
# If it is not set, then set it.
if [[ "x$_MSET" == "x" ]]
then
    _MSET=126
fi
###############################################################################
#   FUNCTION: validate_lv_for_mndhb
#
#   Validates a logical volume meets criteria for use with mndhb.
#   Said lv should:
#       -c 1            only a single copy; e.g., no LVM mirroring
#       -e m            use the minimum number of disks
#       -L MNDHB        label text, for easy identification
#       -t jfs          not really a good fit on type, but should be workable
#       -u 1            confine to a single disk
#       -v n            no write verify
#       -w n            no mirror write consistency
#       The number of physical partitions should be based on TopSvcs'
#       statement of the space required.
#       We'll also probably pass the "-G", "-U" and "-P" flags, to set
#       the owner, group and permissions for the logical volume special file.
#       I don't think that the "-r" to prohibit relocation
#       on reorganization is strictly required, but we may do it anyway.
#       The "-y" parameter to provide a logical volume name is not strictly
#       required, but may be useful for easy identification in testing.
#
#   Arguments: candiate logical volume name
#
#   Returns: 1 if the lv meets criteria
#            0 otherwise
#
##############################################################################
function _validate_lv_for_mndhb
{
    typeset PROGNAME=validate_lv_for_mndhb
    [[ -n $_DEBUG ]] && {
        print "DEBUG: Entering $PROGNAME version 1.5 for $*"
        (( $_DEBUG >= 8 )) && {
            set -x
        }
        set -x
    }
    #
    :   Pick up the candidate name
    #
    candidate_lv=$1
    vg_name=$2
echo "Validate logical volume [$candidate_lv] for use with mndhb"
echo "[not yet implemented]"
    return 1;
}
###############################################################################
#   FUNCTION: validate_rg_for_mndhb
#
#   Validates a resource group meets criteria for use with mndhb.
#   Said group should:
#       Include all cluster nodes as participating nodes
#       Have appropriate startup, fallover and fallback policies for a
#               concurrent resource group
#       Include at least one concurrent vg resource
#
#   Arguments: optional -v flag to indicate flag only explicit mismatches
#              candiate resource group name
#	       options volume group name
#
#   Returns: 0 if the group meets criteria
#            1 otherwise
#
##############################################################################
function _validate_rg_for_mndhb
{
    typeset PROGNAME=_validate_rg_for_mndhb
    [[ -n $_DEBUG ]] && {
        print "DEBUG: Entering $PROGNAME version 1.5 for $*"
        (( $_DEBUG >= 8 )) && {
            set -x
        }
    }
    #
    :	Check for '-v', which means a less strict test: a resource group or
    :	volume group that is not present is acceptable
    #
    V_FLAG=false
    if [[ $1 = '-v' ]]; then
	V_FLAG=true
	shift
    fi
    #
    :   Pick up the candidate resource group and volume group names
    #
    candidate_rg=$1
    candidate_vg=$2
    #
    :	Retreive info for $candidate_rg
    #
    if ! group_info=$(odmget -q "group = $candidate_rg" HACMPgroup)
    then
	#
	:   ODM failure
	#
	return 1
    fi
    if [[ -z $group_info ]]; then
	#
	:   $candidate_rg does not exist
	#
	if [[ $V_FLAG = true ]]; then
	    #
	    :   The '-v' flag was specified, which means that absence is
	    :   acceptable
	    #
	    return 0
	else
	    #
	    :	Else absence is inexcusable
	    #
	    return 1
	fi
    else
	#
	:   Having gotten information on $candidate_rg from ODM, use eval to
	:   turn that information into a series of local variables for easy
	:   handling
	#
	eval $(print "$group_info" | sed -n '/ = /s/ = /=/p')
    fi
    #
    :	$candidate_rg node list should include all cluster nodes
    #
    cluster_node_count=$(IFS=, set -- $_CLUSTER_NODES ; print $* | wc -w)
    group_node_count=$(print $nodes | wc -w)
    if (( $group_node_count != $cluster_node_count )); then
        [[ -n $_DEBUG ]] && (( $_DEBUG >= 8 )) && {
            echo "$PROGNAME: Warning: For multi-node disk heartbeat the containing resource group should include all cluster nodes."
            echo "The node list for resource group [$candidate_rg] does not"
            echo "match the list of all cluster nodes [$_CLUSTER_NODES]."
        }
        # warning only - keep going
    fi
    #
    :	Compare $candidate_rg attributes to the required settings
    #
    if [[ $startup_pref != "OAAN" || $fallover_pref != "BO" || $fallback_pref != "NFB" ]]; then
        [[ -n $_DEBUG ]] && (( $_DEBUG >= 8 )) && {
            echo "$PROGNAME: Error: For multi-node disk heartbeat the containing resource group must have the following policies:"
            echo "startup policy = OAAN (Online on all available nodes)"
            echo "fallover policy = BO (Bring offline)"
            echo "fallback policy = NFB (Never fallback)"
            echo "The candidate resource group [$candidate_rg] has the following settings:"
            echo "startup policy = $startup_pref"
            echo "fallover policy = $fallover_pref"
            echo "fallback policy = $fallback_pref"
        }
        return 1;
    fi
    #
    :	Here check to see if $candidate_rg contains a concurrent mode volume
    :	group which would be needed to hold the logical volume used for MNDHB
    #
    qualifier="name = CONCURRENT_VOLUME_GROUP AND group = $candidate_rg"
    if [[ -n $candidate_vg ]]; then
	#
	:   We were given the volume group name $candidate_vg.  Check to see
	:   if that is present in the resource group $candidate_rg
	#
	qualifier="$qualifier and value = $candidate_vg"
    fi
    #
    :	Search ODM with $qualifier
    #
    if ! resource_info=$(odmget -q "$qualifier" HACMPresource)
    then
	#
	:   ODM failure
	#
	return 1
    fi
    if [[ -z $resource_info ]]; then
	#
	:   The ODM search returned nothing
	#
	if [[ $V_FLAG = false ]]; then
	    #
	    :	Did not find what we were looking for
	    #
	    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 8 )); then
		if [[ -z $candidate_vg ]]; then
		    echo "$PROGNAME: Error: Resource group [$candidate_rg] does not contain any Concurrent Mode Volume Groups"
		    echo "Add the Volume Group to a Resoure Group first"
		else
		    echo "$PROGNAME: Error: Resource group [$candidate_rg] does not contain Concurrent Mode Volume Group [$candidate_vg]"
		fi
	    fi
	    return 1
	else
	    #
	    :   The '-v' flag was specified, which means that absence is
	    :   acceptable
	    #
	    return 0
	fi
    fi
    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 8 )); then
	#
	:   When producing verbose debugging output, pick up the name of the
	:   concurrent mode volume group found in $candidate_vg, if it was not
	:   passed in
	#
	if [[ -z $candidate_vg ]]; then
	    #
	    :	Extract the name of the concurrent mode volume group from the
	    :	odm information obtained above
	    #
 	    candidate_vg=$(print "$resource_info" | sed -n '/value =/s/.*"\([^"]*\)".*/\1/p')
	fi
	echo "$PROGNAME: Resource group [$candidate_rg] contains Concurrent Mode Volume Group [$candidate_vg]"
    fi
    #
    :	 Passed
    #
    return 0;
}
###############################################################################
#   FUNCTION: add_lv_to_vg
#
#   Adds a logical volume to a volume group.
#
#   Arguments: logical volume and volume group names
#
#   Returns: 1 if all goes well
#            0 otherwise
#
###############################################################################
function _add_lv_to_vg
{
    typeset PROGNAME=_add_lv_to_vg
    [[ -n $_DEBUG ]] && {
        print "DEBUG: Entering $PROGNAME version 1.5 for $*"
        (( $_DEBUG >= 8 )) && {
            set -x
        }
    }
    #
    #   Pick up the incoming names
    #
    lv_name=$1
    vg_name=$2
echo "add_lv_to_vg: add lv $lv_name to vg $vg_name"
echo "[not yet implemented]"
    return 1;
}
###############################################################################
#   FUNCTION: create_mndhb_network
#
#   Creates a multi node disk heartbeat network and entries in HACMPadapter
#
#   Arguments: network name - name for the new network
#               resource group name - group name (used for adapter odm entry)
#               logical volume name - name of the lv
#               volume group name - name of the vg
#
#   Returns: 0 if all goes well
#            1 otherwise
#
###############################################################################
function _create_mndhb_network
{
    typeset PROGNAME=_create_mndhb_network
    [[ -n $_DEBUG ]] && {
        print "DEBUG: Entering $PROGNAME version 1.5 for $*"
        (( $_DEBUG >= 8 )) && {
            set -x
        }
    }
    #
    :   Pick up the incoming names
    #
    net_name=$1
    rg_name=$2
    lv_name=$3
    vg_name=$4
    #
    :	Check to see if the network $net_name exists
    #
    net_info=$(/usr/es/sbin/cluster/utilities/cllsnw -c | grep ^$net_name:)
    if [[ -n $net_info ]]; then
	nls_msg -l $cspoc_tmp_log ${_MSET} 61 \
        "$PROGNAME: Network [$net_name] already exists\n" $PROGNAME $net_name
	#
        : Network $net_name already exists, delete it and fall into the
	: code below to create it anew
	#
        _delete_mndhb_network $net_name
    fi
    #
    : Add the nim entry, if not already there
    #
    nim_info=$(/usr/es/sbin/cluster/utilities/cllsnim -c | grep ^diskhbmulti:)
    if [[ -z $nim_info ]]; then
            #
        :   Create the diskhbmulti stanza in HACMPnim
        #
        echo "HACMPnim:" >/tmp/nim.add.$$
        echo "\tname = \"diskhbmulti\"" >>/tmp/nim.add.$$
        echo "\tdesc = \"Disk Heartbeating for Multiple Nodes\"" >>/tmp/nim.add.$$
        echo "\taddrtype = 1" >>/tmp/nim.add.$$
        echo "\tpath = \"/usr/sbin/rsct/bin/hats_diskhb_nim\"" >>/tmp/nim.add.$$
        echo "\tpara = \"MANUAL_START TUNABLE_RESTART\"" >>/tmp/nim.add.$$
        echo "\tgrace = 60" >>/tmp/nim.add.$$
        echo "\thbrate = 3000000" >>/tmp/nim.add.$$
        echo "\tcycle = 6" >>/tmp/nim.add.$$
        echo "\tgratarp = 0" >>/tmp/nim.add.$$
        echo "\tentry_type = \"adapter_type\"" >>/tmp/nim.add.$$
        echo "\tnext_generic_type = \"transport\"" >>/tmp/nim.add.$$
        echo "\tnext_generic_name = \"\"" >>/tmp/nim.add.$$
        echo "\tsrc_routing = 0" >>/tmp/nim.add.$$
      	if ! odmadd /tmp/nim.add.$$
        then
	    nls_msg -l $cspoc_tmp_log ${_MSET} 62 \
            "$PROGNAME: Error: Failed creating nim entry for diskhbmulti\n" $PROGNAME
            [[ -n $_DEBUG ]] && (( $_DEBUG >= 8 )) && {
                echo "Temporary file is [/tmp/nim.add.$$]"
            }
            return 1;
        fi
        rm -f /tmp/nim.add.$$
    fi
    #
    :	Create the new network, $net_name
    #
    if ! /usr/es/sbin/cluster/utilities/clmodnetwork -a -n $net_name -s "255.0.0.0" -t "serial" -i "diskhbmulti"
    then
	nls_msg -l $cspoc_tmp_log 63 \
	"$PROGNAME: Error: Could not create network [$net_name]\n" $PROGNAME $net_name
        return 0;
    fi
    #
    : Create entries in HACMPadapter for each node.
    : First, generate node list
    #
    if [[ -n $rg_name ]];then
	#
	:   Given resource group $rg__name, find the nodes in it
	#
	group_nodes=$(/usr/es/sbin/cluster/utilities/clgetgrp -c | grep ^$rg_name: | cut -f4 -d":")
    else
	#
	:   If given no resource group name, use the entire set of cluster
	:   nodes
	#
	if [[ -n $_CLUSTER_NODES ]];then
	    group_nodes=$(IFS=, set -- $_CLUSTER_NODES ; print $*)
	else
	    group_nodes=$(odmget -q "object = VERBOSE_LOGGING" HACMPnode | \
	            sed -n 'name =/s/^.* "\(.*\)".*/\1/p')
	fi
    fi
	    #
    : Logical volume name must be in the form "/dev/xxx" - if called without
    : the "/dev/" part, add it here
    #
    if [[ $lv_name != /dev/* ]]; then
	lv_name="/dev/"$lv_name
	: prepended incoming name with /dev/ - full name is now $lv_name
    fi
    #
    : Create adapter entry for each node
    #
    rm -f /tmp/adapter.add.$$
    let seq=1
    for node in $group_nodes
    do
	#
        : ip_label field needs to be unique, cluster wide
	#
        export ip_label=$node\_$seq    # candidate label
	#
	: The following print is intended to limit the size of the ip_label
	: entry. 
	#
        ip_label=$(ksh93 -c 'print ${ip_label:$((${#ip_label} - 32))}')
	#
	: loop to find a new/unique name
	#
        while [[ -n $(odmget -q"ip_label = $ip_label" HACMPadapter 2>/dev/null)  ]] # check for existing
        do
            let seq=seq+1
            ip_label=$node\_$seq        # candidate label
            ip_label=$(ksh93 -c 'print ${ip_label:$((${#ip_label} - 32))}')
        done
        #
	:   Create the stanza in HACMPadapter for network $net_name
	#
        echo "HACMPadapter:" >>/tmp/adapter.add.$$
        echo "\ttype = \"diskhbmulti\"" >>/tmp/adapter.add.$$
        echo "\tnetwork = \"$net_name\"" >>/tmp/adapter.add.$$
        echo "\tnodename = \"$node\"" >>/tmp/adapter.add.$$
        echo "\tip_label = \"$ip_label\"" >>/tmp/adapter.add.$$
        echo "\tfunction = \"service\"" >>/tmp/adapter.add.$$
        echo "\tidentifier = \"$lv_name\"" >>/tmp/adapter.add.$$
        echo "\tnetmask = \"\"" >>/tmp/adapter.add.$$
        echo "\thaddr = \"\"" >>/tmp/adapter.add.$$
        echo "\tinterfacename = \"$vg_name\"" >>/tmp/adapter.add.$$
        echo "\thb_alias = \"\"" >>/tmp/adapter.add.$$
        echo "\tnodebound = 0" >>/tmp/adapter.add.$$
        echo "\tmax_aliases = 0" >>/tmp/adapter.add.$$
        let seq=seq+1
    done
    #
    :	Add adapter $net_name to the configuration
    #
    if ! odmadd /tmp/adapter.add.$$
    then
	nls_msg -l $cspoc_tmp_log ${_MSET} 64 \
        "$PROGNAME: Error: Failed creating adapter entries for network [$net_name]\n" $PROGNAME $net_name
        [[ -n $_DEBUG ]] && (( $_DEBUG >= 8 )) && {
            echo "Temporary file is [/tmp/adapter.add.$$]"
        }
        return 1;
    fi
    rm -f /tmp/adapter.add.$$
    return 0;
}
###############################################################################
#   FUNCTION: delete_mndhb_network
#
#   Deletes a multi node disk heartbeat network and entries in HACMPadapter
#
#   Arguments: network name - name of the network
#
#   Returns: 1 if all goes well
#            0 otherwise
#
###############################################################################
function _delete_mndhb_network
{
    typeset PROGNAME=delete_mndhb_network
    [[ -n $_DEBUG ]] && {
        print "DEBUG: Entering $PROGNAME version 1.5 for $*"
        (( $_DEBUG >= 8 )) && {
            set -x
        }
    }
    RETCODE=0
    #
    :   Pick up the incoming names
    #
    net_name=$1
    #
    :	Check if the network exists and harvest vg name
    #
    vg=$(/usr/es/sbin/cluster/utilities/cllsif -c | grep :$net_name: | tail -1 | cut -f9 -d:)
    if [[ -z $vg ]]; then
        #
        :   If network does not exist we are already done, but the caller is
	:   confused
        #
	nls_msg -l $cspoc_tmp_log ${_MSET} 65 \
        "$PROGNAME: Error: Network [$net_name] does not exist \n" $PROGNAME $net_name
	RETCODE=1
    else
	#
	:   Use clmodnetwork to remove the network itself - this changes the
	:   config to indicate a change was made - and force reconfig - and
	:   removes all adapters.
	#
        /usr/es/sbin/cluster/utilities/clmodnetwork -d -n $net_name 
	RETCODE=$?
        if (( $RETCODE != 0 ))
	then
	    nls_msg -l $cspoc_tmp_log ${_MSET} 66 \
            "$PROGNAME: Error: Failed removing network [$net_name], exit code from clmodnetwork was $RETCODE\n" $PROGNAME $net_name $RETCODE
    	fi
	#
	: Remove any monitor entries
 	#
        monitor=$(odmget -q "name = RESOURCE_TO_MONITOR and type = CONCURRENT_VOLUME_GROUP and value = $vg" HACMPmonitor | \
            sed -n '/monitor = /s/^.* "\(.*\)".*/\1/p')
        if [[ -n $monitor ]]
        then
            #
            :       Monitor $monitor is configured for this volume group.  
            :       Delete any entries associated with it.
            #
	    odmdelete -o HACMPmonitor -q"monitor = $monitor"
	    RETCODE=$?
            if (( $RETCODE != 0 ))
	    then
	        nls_msg -l $cspoc_tmp_log ${_MSET} 67 \
                "$PROGNAME: Error: Failed removing failure action for [$net_name], exit code from odmdelete was $RETCODE\n" $PROGNAME $net_name $RETCODE
    	    fi
	fi
    fi
    return $RETCODE
}
#
:   Pick up passed parameters - a PVID must be given
#
print $_NON_FLG_ARGS | read EPVID LV_NAME
if [[ -z $EPVID ]]
then
    print $(eval $_USAGE)
    exit 2
fi
#
:   Pick up the logical volume name
#
DPVID=$(print $EPVID | cldecodearg)
_IMPORT_PVID=$DPVID
#
:   Pick up the logical volume name, if one was given
#
DLV_NAME=${LV_NAME:+$(print $LV_NAME | cldecodearg)}
#
:   Pick up passed options
#
while getopts ":L:e:y:V:r:s:f?" gotten $_CMD_ARGS
do
    case $gotten in
	L )
		#
		:   Logical volume label -  both encoded and decoded forms
		#
		LVLabel=$OPTARG
		DLVLabel=$(print $LVLabel | cldecodearg)
	    ;;
	e )
		#
		:   Network Name - both encoded and forms
		#
		NetName=$OPTARG
		DNetName=$(print $NetName | cldecodearg)
	    ;;
	y )
		#
		:   Volume group name - both encoded and decoded forms
		#
		VG=$OPTARG
		DVG=$(print $VG | cldecodearg)
	    ;;
	V ) 
		#
		:   Major number - both encoded and decoded forms
		#
		MajorNumber=$OPTARG
		DMajorNumber=$(print $MajorNumber | cldecodearg)
	    ;;
	r )
		#
		:   New resource group name - both encoded and decoded forms
		#
		ResourceGroup=$OPTARG
		DResourceGroup=$(print $ResourceGroup | cldecodearg)
	    ;;
	s ) 
		#
		:   Partition size - both encoded and decoded forms
		#
		PartitionSize=$OPTARG
		DPartitionSize=$(print $PartitionSize | cldecodearg)
	    ;;
        f )
                #
                :   Force option specified
                #
                ForceMKVG=TRUE
            ;;
	* )
		#
		:   Unexpected input
		#
		print "Unexpected input: '-${gotten} $(print $OPTARG | cldecodearg)"
		exit 1
		;;
    esac
done
#
:   Validation of parameters - the given PVID $DPVID must be a member of the given
:   volume group, $DVG, if any
:
:   First, find out what PVIDs are known across the clusters, removing any hdisks
:   associated with vpath devices. Pick up the  free major numbers, while we are at it.
#
CHK_CMD="LC_ALL=C lspv -L | tr -s ' ' | grep -vi 'none None' | sort -r -u -k2,2 && echo FREEMAJORS:\$(/usr/sbin/lvlstmajor)"
E_CHK_CMD=$(print $CHK_CMD | clencodearg -e)
cel_f18
#
:   Check to ensure that the given PVID $DPVID shows up as owned by either the
:   given volume group, $DVG, or by no volume group, anywhere in the cluster
#
pv_nodes=""
vg_nodes=""
REF_NODE=""
PVID_IN_VG=false
while read out_node out_hdisk out_pvid out_vg out_rest 
    do
	if [[ $out_vg = $DVG ]]
	then
	    if [[ $vg_nodes != @(?(* )${out_node%:}?( *)) ]]
	    then
		#
		:   Add $out_node to the list of nodes on which the volume group $DVG is known
		#
		vg_nodes=${vg_nodes:+${vg_nodes}" "}${out_node%:}
	    fi
	fi
	if [[ $out_pvid = $DPVID ]]
	then
	    #
	    :   Add $out_node to the list of nodes on which the PVID $DPVID is known
	    #
	    pv_nodes=${pv_nodes:+${pv_nodes}" "}${out_node%:}
	    if [[ $out_vg != None && $out_vg != $DVG ]]
	    then
		#
		:	The given PVID, $DPVID, shows up as owned by volume group
		:	$out_vg - not the same as $DVG - on node $out_node
		#
		nls_msg -l $cspoc_tmp_log ${_MSET} 42  \
		"${_CMD}: physical volume $DPVID is already in use in volume group $out_vg\n" ${_CMD} $DPVID $out_vg
		exit 1
	    elif [[ $out_vg = $DVG ]]
	    then
		#
		:   The given PVID, $DPVID, is already part of $DVG, so note
		:   that fact
		#
		PVID_IN_VG=true
	    fi
	fi
    done < $try_out
#
:   Save the the PVID and free major number information, since we will need it later
#
cp $try_out /tmp/cl_lspv.out.$$
#
: Determine the reference node
: The hdiskname will be needed, so get it now.
: If the PVID refers to a vpath, then note that as well.
#
LOCAL_NODE=$(get_local_nodename)
if [[ $_TARGET_NODES = @(?(*,)$LOCAL_NODE?(,*)) ]]
then
    _REFNODE=$LOCAL_NODE
else
    _REFNODE=${_TARGET_NODES%%,*}
fi
if [[ -z $_REFNODE ]]
then
    nls_msg -l $cspoc_tmp_log $_MSET 20 \
    "${_CMD}: The reference node was not detected\n" ${_CMD}
    exit 1
fi
#
:   Check to ensure consistency of PVID use across the cluster: it must be either an hdisk on
:   all cluster nodes, or a vpath on all cluster nodes, but not a mix.
#
if grep $DPVID /tmp/cl_lspv.out.$$ | grep -p hdisk | grep -p vpath
then
    nls_msg -l $cspoc_tmp_log $_MSET 25 "${_CMD}: PVID $DPVID is used inconsistently\n" ${_CMD} $DPVID
    grep $DPVID /tmp/cl_lspv.out.$$  # show them what we mean by 'inconsistently'
    exit 1
fi
#
: The hdiskname will be needed, so get it now.
#
hdiskname=""
grep ${_REFNODE}.*vpath.*${DPVID} /tmp/cl_lspv.out.$$ | read out_node hdiskname rest
: hdisk=$hdiskname
if [[ $hdiskname = vpath* ]]
then
    #
    : The PVID, $DPVID, is for a vpath device, use $hdiskname and cl_mkvg4vp
    #
    MKVG_CMD=/usr/es/sbin/cluster/sbin/cl_mkvg4vp
else
    grep ${_REFNODE}.*${DPVID} /tmp/cl_lspv.out.$$ | read out_node hdiskname rest
    #
    : The PVID, $DPVID, is not for a vpath device, use $hdiskname and cl_mkvg
    #
    MKVG_CMD=/usr/es/sbin/cluster/sbin/cl_mkvg
fi
#
:   Check to ensure that the given PVID, $DPVID, is known on all the nodes that
:   are supposed to used this MNDHB network
#
for node in $(IFS=, set -- $_TARGET_NODES ; print $*)
do
    #
    :	Is $node in the list $pv_nodes
    #
    if [[ $pv_nodes != @(?(* )$node?( *)) ]]
    then
	nls_msg -l $cspoc_tmp_log ${_MSET} 43 \
	"${_CMD}: physical volume $DPVID is not known on node $node\n" ${_CMD} $DPVID $node
	exit 1
    fi
done
if [[ -n $vg_nodes ]]
then
    #
    :   If the given volume group $DVG already exists, check to ensure that it is known
    :   on all the nodes that are supposed to use this MNDHB network
    # 
    for node in $(IFS=, set -- $_TARGET_NODES ; print $*)
    do
	#
	:	Is $node in the list $vg_nodes
	#
	if [[ $vg_nodes != @(?(* )$node?( *)) ]]
	then
	    nls_msg -l $cspoc_tmp_log ${_MSET} 44 \
	    "${_CMD}: volume group $DVG is not known on node $node\n" ${_CMD} $DVG $node
	    exit 1
	fi
    done
fi
#
:   Validation of parameters - if a logical volume name was given, make sure
:   that logical volume does not already exist somewhere
#
if [[ -n $LV_NAME ]]
then
    #
    :	Check across the cluster and see if any node knows of logical volume
    :	$DLV_NAME
    #
    QUAL="'attribute = lvserial_id and name = $DLV_NAME'"
    E_QUAL=$(print $QUAL | clencodearg)
    E_CUAT=$(print "CuAt" | clencodearg)
    TRY_RC=0
cel_f19
        if (( $TRY_RC != 0 )) && [[ -z $_SPOC_FORCE ]]
    then
	#
	:   If some node was unreachable,  or for some other reason could not
	:   run the above odmget command, stop here unless the force option
	:   was specified
	#
	exit 1
    fi
    if grep -qw $DLV_NAME $try_out 
    then
	#
	:   Logical volume $DLV_NAME already exists
	#
	nls_msg -l ${cspoc_tmp_log} ${_MSET} 46 \
	"${_CMD}: Logical volume $DLV_NAME already exists\n" $_CMD $DLV_NAME
	exit 1
    fi
fi
if [[ -n $_RES_GRP ]]
then
    #
    :	If \$_RES_GRP is set, its value $_RES_GRP is from the '-g' cspoc
    :	operand.  Go check to see if it is correctly defined as a concurrent
    :	resource group.  If the given volume group $DVG is defined as a resource,
    :	check to see if its defined as a concurrent volume group.
    #
    if ! _validate_rg_for_mndhb -v $_RES_GRP $DVG
    then
	nls_msg -l ${cspoc_tmp_log} ${_MSET} 47 \
	"${_CMD}: Resource group $_RES_GRP is incorrectly defined for MNDHB\n" ${_CMD} $_RES_GRP
	exit 1
    fi
fi
#
:   If the volume group $DVG does not exist, create it with that given PVID, $DPVID
#
export _CSPOC_MODE="concurrent"
export _CSPOC_CALLED_FROM_SMIT="true"
if [[ -z $vg_nodes ]]
then
    #
    :	Check to see if we were given a valid major number to use
    #
    if [[ -z $DMajorNumber ]] || (( $DMajorNumber == 0 ))
    then
	#
	:	Generate an appropriate major number - unique across the cluster - for
	:	the new volume group
	#
	major=$(/usr/es/sbin/cluster/cspoc/cl_getmajor /tmp/cl_lspv.out.$$)
    else
	#
	:   Else given $major as a major number, so use that
	#
	major=$DMajorNumber
    fi
    #
    :	Take all the mkvg defaults, except for no auto varyon, enhanced
    :	concurrent mode,  specified $DVG name, major number $major, 
    :	partition size, $DPartitionSize and given PVID, $DPVID
    #
    VG_NAME_PARM=""
    if [[ -n $DVG ]]
    then
	VG_NAME_PARM="-y $DVG"
    fi
    if [[ -z $_RES_GRP ]]
    then
	CSPOC_OPT="-n $_TARGET_NODES"
    else
	CSPOC_OPT="-g $_RES_GRP"
    fi
    if [[ -n $DPartitionSize ]] 
    then
	S_PARM="-s $DPartitionSize"
    else
	S_PARM=""
    fi
    FORCE_MKVG_FLAG=""
    if [[ -n $ForceMKVG ]]
    then
        FORCE_MKVG_FLAG="-f"
    fi
    # reset debug so the following command output does not have debug info
    SAVE_DEBUG=$_DEBUG
    unset _DEBUG
    if ! MKVG_OUT=$($MKVG_CMD -cspoc "$CSPOC_OPT" $FORCE_MKVG_FLAG -n -C $VG_NAME_PARM -V $major $S_PARM $DPVID)
    then
	nls_msg -l $cspoc_tmp_log ${_MSET} 48 \
	"${_CMD}: Unable to create volume group $DVG containing disk $DPVID\n" ${_CMD} $DVG $DPVID
	exit 1
    else
	print $MKVG_OUT | read out_node DVG out_rest
	#
	:   Successfully created volume group $DVG containing physical disk
	:   $DPVID across nodes $_TARGET_NODES
	#
	VG=$(print $DVG | clencodearg)
	nls_msg -l $cspoc_tmp_log ${_MSET} 49 \
	"${_CMD}: Created volume group $DVG on physical disk $DPVID\n" ${_CMD} $DVG $DPVID
	vg_nodes=$(IFS=, set -- $_TARGET_NODES ; print $*)
	PVID_IN_VG=true
    fi
    export _DEBUG=$SAVE_DEBUG
elif [[ $PVID_IN_VG != true ]]
then
    #
    :	Invoke the C-SPOC extendvg function to add disk $hdiskname with PVID
    :	$DPVID to volume group $DVG
    #
    if [[ -z $_RES_GRP ]]
    then
	CSPOC_OPT="-n $_TARGET_NODES"
    else
	CSPOC_OPT="-g $_RES_GRP"
    fi
    if ! /usr/es/sbin/cluster/sbin/cl_extendvg -cspoc "$CSPOC_OPT" -R $_REFNODE $DVG $hdiskname
    then
	nls_msg -l $cspoc_tmp_log ${_MSET} 50 \
	"${_CMD}: Unable to add disk $hdiskname to volume group $DVG\n" ${_CMD} $hdiskname $DVG
	exit 1
    fi
fi
#
:   Determine the characteristics of the volume group, so that we know how
:   many phyiscal partitions to allocate to the logical volume
#
_vg_status
_vg_active
LSVG_CMD="LC_ALL=C lsvg $DVG"
E_LSVG_CMD=$(print $LSVG_CMD | clencodearg -e)
cel_f20
#
:   Determine the mklv parameters - logical volume is the number of partitions
:   to hold 32 megabytes
#
integer PARTITION_SIZE=$(sed -n 's/.* *PP SIZE: *\([^ ]*\) *.*/\1/p' $try_out)
integer PP=32/$PARTITION_SIZE
[[ $PP -eq 0 ]] && PP=1
LV_LABEL_PARM=""
if [[ -n $LVLabel ]]
then
    #
    :	Pick up the label $DLVLabel
    #
    LV_LABEL_PARM="-L $DLVLabel"
fi
LV_NAME_PARM=""
if [[ -n $DLV_NAME ]]
then
    #
    :	Pick up the logical volume name, $DLV_NAME
    #
    LV_NAME_PARM="-y $DLV_NAME"
fi
#
:   Now, call the C-SPOC function to create the logical volume that will hold
:   the multi node disk heart beat network.  Capture the logical volume name,
:   in case we did not get the name as input. The logical volume will
:   be created on $hdiskname
#
if [[ -z $_RES_GRP ]]
then
    CSPOC_OPT="-n $_TARGET_NODES"
else
    CSPOC_OPT="-g $_RES_GRP"
fi
# reset debug so the following command output does not have debug info
SAVE_DEBUG=$_DEBUG
unset _DEBUG
if ! MKLV_OUT=$(/usr/es/sbin/cluster/sbin/cl_mklv -cspoc "$CSPOC_OPT" -R $_REFNODE $LV_NAME_PARM "$LV_LABEL_PARM" -u 1 -c 1 -e m -t jfs -v n -w n -r n $DVG $PP $hdiskname)
then
    nls_msg -l $cspoc_tmp_log ${_MSET} 52 \
	"${_CMD}: Unable to create logical volume $DLV_NAME on physical disk $hdiskname ($DPVID) to hold MNDHB network $DNetName\n" \
	${_CMD} $DLV_NAME $hdiskname $DPVID $DNetName
    exit 1
else
    print $MKLV_OUT | read node_out DLV_NAME rest
    #
    :	Successfully created logical volume $DLV_NAME in volume group $DVG on
    :	physical disk $DPVID
    #
    LV_NAME=$(print $DLV_NAME | clencodearg)
    nls_msg -l $cspoc_tmp_log ${_MSET} 53 \
    "${_CMD}: Created logical volume $DLV_NAME on physical disk $hdiskname ($DPVID) to hold MNDHB network $DNetName\n" \
    ${_CMD} $DLV_NAME $hdiskname $DPVID $DNetName
fi
export _DEBUG=$SAVE_DEBUG
#
:   Finished with the volume group $DVG
#
_vg_sync release
#
:   Do we have to create a resource group named \$DResourceGroup
#
if [[ -n $DResourceGroup && -z $(odmget -q "group = $DResourceGroup" HACMPgroup) ]]
then
    #
    :   Create a concurrent resource group named $DResourceGroup containing the nodes
    :   $_TARGET_NODES - first, build the definition in a work file,
    :	/tmp/group.add.$$
    #
    print "HACMPgroup:" > /tmp/group.add.$$
    print "\tgroup = $DResourceGroup" >> /tmp/group.add.$$
    print "\tstype = ignore" >> /tmp/group.add.$$
    print "\tnodes = $(IFS=, set -- $_TARGET_NODES ; print $*)" >> /tmp/group.add.$$
    print "\tstartup_pref = OAAN" >> /tmp/group.add.$$
    print "\tfallover_pref = BO" >> /tmp/group.add.$$
    print "\tfallback_pref = NFB" >> /tmp/group.add.$$
    #
    :	Add the new resource group definition
    #	
    if ! odmadd /tmp/group.add.$$
    then
	nls_msg -l $cspoc_tmp_log ${_MSET} 54 \
	"${_CMD}: odmadd failed - could not create resource group $DResourceGroup\n" ${_CMD} $DResourceGroup
	exit 1
    fi
    rm /tmp/group.add.$$
fi
#
:   The value of \$_RES_GRP, if any, was used to determine which nodes this
:   operation takes place on.  The value of \$DResourceGroup, if any, is the name
:   of a resource group to create to hold the MNDHB disk heart beat network
#
if [[ -z $DResourceGroup && -n $_RES_GRP ]]
then
    DResourceGroup=$_RES_GRP
fi
#
:	Does resource group $DResourceGroup contain a concurrent volume group named
:	$DVG?
#
if [[ -z $(odmget -q "group = $DResourceGroup and name = CONCURRENT_VOLUME_GROUP and value = $DVG" HACMPresource) ]]
then
    #
    :   Create the definition of the volume group $DVG as a concurrent volume
    :	group in resource group $DResourceGroup
    #
    print "HACMPresource:" > /tmp/resource.add.$$
    print "\tgroup = $DResourceGroup" >> /tmp/resource.add.$$
    print "\tname = CONCURRENT_VOLUME_GROUP" >> /tmp/resource.add.$$
    print "\tvalue = $DVG" >> /tmp/resource.add.$$
    #
    :   Add concurrent volume group $DVG to resource group $DResourceGroup
    #
    if ! odmadd /tmp/resource.add.$$
    then
	nls_msg -l $cspoc_tmp_log ${_MSET} 55 \
	"${_CMD}: odmadd failed - could not add volume group $DVG to resource group $DResourceGroup\n" ${_CMD} $DVG $DResourceGroup
	exit 1
    fi
    rm /tmp/resource.add.$$
fi
#
:   If we were given a network name, create the Multi Node Disk Heart Beat
:   Network named $DNetName
#
if [[ -n $DNetName && -n $DResourceGroup ]]
then
    if ! _create_mndhb_network $DNetName $DResourceGroup $DLV_NAME $DVG
    then
	nls_msg -l $cspoc_tmp_log ${_MSET} 56 \
	"${_CMD}: Create failed for MNDHB network $DNetName on logical volume $DLV_NAME in volume group $DVG and resource group $DResourceGroup\n" \
	${_CMD} $DNetName $DResourceGroup $DLV_NAME $DVG
	exit 1
    fi
fi
#
:   Clean up work file on successful exit
#
rm -f /tmp/cl_lspv.out.$$