#!/bin/ksh

export try_out try_err cspoc_tmp_log
export FPATH=/usr/es/sbin/cluster/cspoc

cspoc_tmp_log=/var/hacmp/log/cel$$_tmplog
log_cmd $cspoc_tmp_log $0 $*

trap 'cexit $cspoc_tmp_log $?' EXIT
function cel_f1
{
    cel_s1=/tmp/cel$$_s1
    try_err=${cel_s1}.err
    try_out=${cel_s1}.out
    trap "log_output $cspoc_tmp_log ${cel_s1} 	    eval $E_LSPV_CMD" EXIT
    IFS=,$IFS
    for node in $_REFNODE; do
	cdsh $cel_s1 $node -q 	    eval $E_LSPV_CMD
	cel_rc=$(get_rc ${cel_s1} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		TRY_RC=$cel_rc
		    		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 24 "${_CMD}: Error executing lspv on node $node.\n" ${_CMD} $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f2
{
    cel_s2=/tmp/cel$$_s2
    try_err=${cel_s2}.err
    try_out=${cel_s2}.out
    trap "log_output $cspoc_tmp_log ${cel_s2} 	eval $E_LSPV_CMD" EXIT
    IFS=,$IFS
    for node in $_NODE; do
	cdsh $cel_s2 $node -q 	eval $E_LSPV_CMD
	cel_rc=$(get_rc ${cel_s2} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	    TRY_RC=$((TRY_RC+cel_rc))
		    	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 24 "${_CMD}: Error executing lspv on node $node.\n" ${_CMD} $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f3
{
    cel_s3=/tmp/cel$$_s3
    try_err=${cel_s3}.err
    try_out=${cel_s3}.out
    trap "log_output $cspoc_tmp_log ${cel_s3} 	/usr/es/sbin/cluster/utilities/clgetactivenodes -n $E_ACTIVE_NODE" EXIT
    IFS=,$IFS
    for node in $_ACTIVE_NODE; do
	cdsh $cel_s3 $node -q 	/usr/es/sbin/cluster/utilities/clgetactivenodes -n $E_ACTIVE_NODE
	cel_rc=$(get_rc ${cel_s3} $node)
	case $cel_rc in
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f4
{
    cel_s4=/tmp/cel$$_s4
    try_err=${cel_s4}.err
    try_out=${cel_s4}.out
    trap "log_output $cspoc_tmp_log ${cel_s4} 	    clgetvg $option $parameter" EXIT
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cdsh $cel_s4 $node -q 	    clgetvg $option $parameter
	cel_rc=$(get_rc ${cel_s4} $node)
	case $cel_rc in
	    0)
				#
				:   Stop on the first node that reports the owning volume group.
				:   Note that this is just the first one that knows about the
				:   volume group - it appears in the local ODM.  Actual volume
				:   group state is determined below.
				#
				TRY_RC=0
				IFS=${IFS#,}
		IFS=${IFS#,}
		return
		;;
	    *)
		if [ $cel_rc != 0 ]; then
		                    # 
		                    :   The C-SPOC communications mechanism does not provide a
		                    :   convenient indication of the difference between being
		                    :   unable to reach a remote node, and a failure of a command
		                    :   run on that remote node.  Attempt to distinguish that here
		                    :   by looking for an error message from that node.
		                    #                                                 
		    		if [[ -f $try_err ]] &&
		    		    ! grep -q "^${node}: " $try_err # ignore any 'not found' msg
		    		then
		    		    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 1 "${_CMD}: Can\'t reach $node, continuing anyway\n" ${_CMD} $node 
		    		fi
		    		TRY_RC=$cel_rc
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f5
{
    cel_s5=/tmp/cel$$_s5
    try_err=${cel_s5}.err
    try_out=${cel_s5}.out
    trap "log_output $cspoc_tmp_log ${cel_s5} 	clresactive -v $VG" EXIT
    cdsh $cel_s5 $_TARGET_NODES -q 	clresactive -v $VG
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cel_rc=$(get_rc ${cel_s5} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	   nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 1 "${_CMD}: can't reach $node, continuing anyway\n"  ${_CMD} $node  
		    	   cel_rc=0
		    	   TRY_RC=$((TRY_RC+cel_rc))
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f6
{
    cel_s6=/tmp/cel$$_s6
    try_err=${cel_s6}.err
    try_out=${cel_s6}.out
    trap "log_output $cspoc_tmp_log ${cel_s6} 	    clvaryonvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s6 $node -q1 	    clvaryonvg $VG
	cel_rc=$(get_rc ${cel_s6} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                    TRY_RC=$cel_rc
		                    nls_msg -2 -l $cspoc_tmp_log 24 7 "error executing clvaryonvg $DVG on node $node\n" $DVG $node 
		                    if [[ -s $try_out || -s $try_err ]]
		                    then
		                        #
		                        :   If stdout or stderr was captured for this failure, show
		                        :   the information to the user
		                        #
		                        nls_msg -2 -l $cspoc_tmp_log 6 7 "Error detail:"
		                        cat -q $try_out $try_err >&2
		                    fi
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f7
{
    cel_s7=/tmp/cel$$_s7
    try_err=${cel_s7}.err
    try_out=${cel_s7}.out
    trap "log_output $cspoc_tmp_log ${cel_s7} 	    cl_pvo -v $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s7 $node -q1 	    cl_pvo -v $VG
	cel_rc=$(get_rc ${cel_s7} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		#
		    		:   Log any error, but continue.
		    		#
		                    TRY_RC=$cel_rc
		                    nls_msg -2 -l $cspoc_tmp_log 24 7 "error executing clvaryonvg $DVG on node $node\n" $DVG $node 
		                    if [[ -s $try_out || -s $try_err ]]
		                    then
		                        #
		                        :   If stdout or stderr was captured for this failure, show
		                        :   the information to the user
		                        #
		                        nls_msg -2 -l $cspoc_tmp_log 6 7 "Error detail:"
		                        cat -q $try_out $try_err >&2
		                    fi
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f8
{
    cel_s8=/tmp/cel$$_s8
    try_err=${cel_s8}.err
    try_out=${cel_s8}.out
    trap "log_output $cspoc_tmp_log ${cel_s8} 		    varyonvg -n -b -u $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s8 $node 		    varyonvg -n -b -u $VG
	cel_rc=$(get_rc ${cel_s8} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log 49 26 "Error unlocking volume group %s\n" "$DVG"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f9
{
    cel_s9=/tmp/cel$$_s9
    try_err=${cel_s9}.err
    try_out=${cel_s9}.out
    trap "log_output $cspoc_tmp_log ${cel_s9}                     varyoffvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s9 $node                     varyoffvg $VG
	cel_rc=$(get_rc ${cel_s9} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                            TRY_RC=$((TRY_RC+cel_rc))
		                            nls_msg -2 -l $cspoc_tmp_log 49 26 "Error unlocking volume group %s\n" 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f10
{
    cel_s10=/tmp/cel$$_s10
    try_err=${cel_s10}.err
    try_out=${cel_s10}.out
    trap "log_output $cspoc_tmp_log ${cel_s10} 		eval $e_update_cmd" EXIT
    IFS=,$IFS
    for node in $NODE_LIST; do
	cdsh $cel_s10 $node -q 		eval $e_update_cmd
	cel_rc=$(get_rc ${cel_s10} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		    TRY_RC=$((TRY_RC+cel_rc))
		    		    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 16 "${_CMD}: Error executing clupdatevg $DVG $_IMPORT_PVID on node $node\n" ${_CMD} $DVG $_IMPORT_PVID $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f11
{
    cel_s11=/tmp/cel$$_s11
    try_err=${cel_s11}.err
    try_out=${cel_s11}.out
    trap "log_output $cspoc_tmp_log ${cel_s11} 		    varyoffvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s11 $node 		    varyoffvg $VG
	cel_rc=$(get_rc ${cel_s11} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 7 "${_CMD}: Error executing varyoffvg $DVG on node $node\n" ${_CMD} $DVG $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f12
{
    cel_s12=/tmp/cel$$_s12
    try_err=${cel_s12}.err
    try_out=${cel_s12}.out
    trap "log_output $cspoc_tmp_log ${cel_s12} 			cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s12 $node -q1 			cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s12} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			    #
		    			    :   Log any error, but continue.
		    			    #
		    			    nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f13
{
    cel_s13=/tmp/cel$$_s13
    try_err=${cel_s13}.err
    try_out=${cel_s13}.out
    trap "log_output $cspoc_tmp_log ${cel_s13} 		    varyonvg -c -P $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s13 $node 		    varyonvg -c -P $VG
	cel_rc=$(get_rc ${cel_s13} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 56 "${_CMD}: Error executing varyonvg -c -P $DVG on node $node\n" $_CMD $DVG 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f14
{
    cel_s14=/tmp/cel$$_s14
    try_err=${cel_s14}.err
    try_out=${cel_s14}.out
    trap "log_output $cspoc_tmp_log ${cel_s14} 		    cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s14 $node -q1 		    cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s14} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			#
		    			:   Log any error, but continue.
		    			#
		    			nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f15
{
    cel_s15=/tmp/cel$$_s15
    try_err=${cel_s15}.err
    try_out=${cel_s15}.out
    trap "log_output $cspoc_tmp_log ${cel_s15} 		cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s15 $node -q1 		cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s15} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		    #
		    		    :   Log any error, but continue.  If this is a real problem, the varyonvg will fail
		    		    #
		    		    nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f16
{
    cel_s16=/tmp/cel$$_s16
    try_err=${cel_s16}.err
    try_out=${cel_s16}.out
    trap "log_output $cspoc_tmp_log ${cel_s16} 		    varyonvg -n $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s16 $node 		    varyonvg -n $VG
	cel_rc=$(get_rc ${cel_s16} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log 49 29 "Error re-locking volume group %s\n" "$DVG"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f17
{
    cel_s17=/tmp/cel$$_s17
    try_err=${cel_s17}.err
    try_out=${cel_s17}.out
    trap "log_output $cspoc_tmp_log ${cel_s17} 	    exportvg $VG" EXIT
    IFS=,$IFS
    for node in $NODE_LIST; do
	cdsh $cel_s17 $node -q 	    exportvg $VG
	cel_rc=$(get_rc ${cel_s17} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		nls_msg -2 -l $cspoc_tmp_log 37 3 "${_CMD}: Could not export volume group $DVG\n" ${_CMD} ${DVG} 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f18
{
    cel_s18=/tmp/cel$$_s18
    try_err=${cel_s18}.err
    try_out=${cel_s18}.out
    trap "log_output $cspoc_tmp_log ${cel_s18}         lsvg" EXIT
    cdsh $cel_s18 $_TARGET_NODES -q         lsvg
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cel_rc=$(get_rc ${cel_s18} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                TRY_RC=$(( TRY_RC + cel_rc ))
		                nls_msg -l ${cspoc_tmp_log} ${_MSET} 30 \
		    		"${_CMD}: Unable to obtain volume group names from cluster node $node\n" ${_CMD} $node
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f19
{
    cel_s19=/tmp/cel$$_s19
    try_err=${cel_s19}.err
    try_out=${cel_s19}.out
    trap "log_output $cspoc_tmp_log ${cel_s19}     clresactive -v $EVGNAME" EXIT
    cdsh $cel_s19 $_CLUSTER_NODES -q     clresactive -v $EVGNAME
    IFS=,$IFS
    for node in $_CLUSTER_NODES; do
	cel_rc=$(get_rc ${cel_s19} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	# only if -f given do we tolerate a non-reachable node
		    	if [[ -z "$_SPOC_FORCE" ]] ; then
		    	    nls_msg -l $cspoc_tmp_log ${_LVM_MSET} 4 "${_CMD}: Error attempting to locate volume group $VGNAME on $node\n" ${_CMD} $VGNAME $node >& 2
		    	else
		    	    nls_msg -l $cspoc_tmp_log ${_LVM_MSET} 1 "${_CMD}: Can\'t reach $node, continuing anyway\n" ${_CMD} $node >& 2
		    	    cel_rc=0
		    	fi
		    	TRY_RC=$((TRY_RC+cel_rc))
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f20
{
    cel_s20=/tmp/cel$$_s20
    try_err=${cel_s20}.err
    try_out=${cel_s20}.out
    trap "log_output $cspoc_tmp_log ${cel_s20}     mkvg4vp $CMD_ARGS" EXIT
    IFS=,$IFS
    for FIRST_NODE in $FIRST_NODE; do
	cdsh $cel_s20 $FIRST_NODE     mkvg4vp $CMD_ARGS
	cel_rc=$(get_rc ${cel_s20} $FIRST_NODE)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	nls_msg -l $cspoc_tmp_log ${_MSET} 34 "${_CMD}: An error occurred executing mkvg4vp $VGNAME on node ${FIRST_NODE} \n" ${_CMD} ${VGNAME} ${FIRST_NODE} >& 2
		    	RETCODE=$cel_rc
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f21
{
    cel_s21=/tmp/cel$$_s21
    try_err=${cel_s21}.err
    try_out=${cel_s21}.out
    trap "log_output $cspoc_tmp_log ${cel_s21}     cl_vg_fence_init -c $EVGNAME $(print 'rw' | clencodearg) $E_NDISK_LIST" EXIT
    IFS=,$IFS
    for FIRST_NODE in $FIRST_NODE; do
	cdsh $cel_s21 $FIRST_NODE     cl_vg_fence_init -c $EVGNAME $(print 'rw' | clencodearg) $E_NDISK_LIST
	cel_rc=$(get_rc ${cel_s21} $FIRST_NODE)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		            TRY_RC=$cel_rc
		    	nls_msg -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $VGNAME fence height could not be set to read/write" $PROGNAME $VGNAME $(dspmsg -s 128 cspoc.cat 2 Read/Write)
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f22
{
    cel_s22=/tmp/cel$$_s22
    try_err=${cel_s22}.err
    try_out=${cel_s22}.out
    trap "log_output $cspoc_tmp_log ${cel_s22} 	    varyoffvg $EVGNAME" EXIT
    IFS=,$IFS
    for FIRST_NODE in $FIRST_NODE; do
	cdsh $cel_s22 $FIRST_NODE 	    varyoffvg $EVGNAME
	cel_rc=$(get_rc ${cel_s22} $FIRST_NODE)
	case $cel_rc in
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
#!/bin/ksh
#  ALTRAN_PROLOG_BEGIN_TAG                                                    
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2020,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                      
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/cspoc/plans/cl_mkvg4vp.cel 1.30.1.5 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 2002,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/plans/cl_mkvg4vp.cel, 726, 2147A_aha726, Feb 05 2021 09:50 PM
# \$Id\$
###############################################################################
#   COMPONENT_NAME: CSPOC
#
# Name:
#       cl_mkvg4vp.cel
#
# Description
#   Creates a shared volume group using vpath devices on a set of specified
#   nodes by called mkvg4vp on one node and then importing the volume group
#   on the other nodes.
#
#   Usage: cl_mkvg4vp -cspoc "[-f] [-g ResourceGroup | -n NodeList]" [-d MaxPVs] [-B] [-G] [-c] [-l true | false] [-x] [-i] [-s PPsize] [-n] [-m MaxPVsize | -t factor] [-r ResourceGroup] [-E] [-V MajorNumber] -f -y VGname PhysicalVolumes
#
# Arguments:
#       -l true | false         Enable/Disable VG for Cross-Site LVM Mirroring
#
#   The cl_mkvg4vp command arguments include all options and arguments
#   that are valid for the AIX mkvg4vp command as well as C-SPOC specific
#   arguments.   The C-SPOC specific arguments are as follows:
#     	-d 1..9			Debug level
# 	-f			C-SPOC force flag
#	-g ResourceGroup	Resource group to use to determine
#						nodes on which to execute cl_mkvg4vp
#	-n NodeList		List of nodes to execute cl_mkvg4vp
#
# Return Values:
#       0       success
#       1       failure
#
###############################################################################
###############################################################################
#
#  FUNCTION: setvg_mode
#
#  Description: Disable/Enable Cross-Site LVM Mirroring for VG.
#
#  Arguments:   $XSiteMirror is set from the value of the -l operand
#               true => enable cross site mirroring for this volume group
#               false => disable cross site mirroring for this volume group
#               NULL => don't set up or remove cross site mirroring
#
#  Returns:     return code from odmadd or odmdelete
#
#  Environment: XSiteMirror
#
###############################################################################
setvg_mode()
{
   if [[ $XSiteMirror == "false" ]]
   then
      odmdelete -q"name=VG AND value=${VGNAME}" -o HACMPsiteinfo >/dev/null
      elif [[ $XSiteMirror == "true" ]]
   then
      #
      # If VG is not present in HACMPsiteinfo then do odmadd.
      #
      if [[ -z $(odmget -q"name=VG AND value=${VGNAME}" HACMPsiteinfo) ]]
      then
         class="HACMPsiteinfo:"
         type="name=VG"
         mvg="value=${VGNAME}"
         printf "%s\n%s\n%s\n" $class $type $mvg | odmadd
      fi
   fi
}
###############################################################################
# Start of main script
###############################################################################
# Include the PATH and PROGNAME initialization stuff
# @(#)69        1.8  src/43haes/usr/sbin/cluster/cspoc/plans/cl_path.cel, hacmp.cspoc, 61haes_r720, 1539B_hacmp720 9/10/15 13:28:25
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/cspoc/plans/cl_path.cel 1.8 
#  
# Licensed Materials - Property of IBM 
#  
# Restricted Materials of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1999,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
 ################################################################################
#   COMPONENT_NAME: CSPOC
#
# Name:
#       cl_path.cel
#
# Description:
#       C-SPOC Path Initialization Routine.  This routine is to be included
#       in all C-SPOC Execution Plans (e.g. '%include cl_path.cel').
#       it sets up the PATH environment variable to prevent hardcoding of 
#       path names in the CSPOC code.
#
# Arguments:
#       None.
#
# Return Values:
#	None.
#
# Environment Variables Defined:
#
#   PUBLIC:
#	PROGNAME Represents the name of the program 
#	HA_DIR Represents the directory the HA product is shipped under.
#
################################################################################
PROGNAME=${0##*/}
PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
# set the HA_DIR env variable to the HA directory
HA_DIR="es"
# Set up useful prompt for when 'set -x' is turned on through _DEBUG
if [[ -n $_DEBUG ]] && (( $_DEBUG == 9 ))
then
    PS4='${PROGNAME:-$_CMD}[$LINENO]: '
    set -x
fi
[[ -n $_DEBUG ]] &&
    print "DEBUG: $PROGNAME version $Source: 61haes_r711 43haes/usr/sbin/cluster/cspoc/plans/cl_mkvg4vp.cel 1/CHECKEDOUT\$"
# Initialize variables
_CMD_NAME=${0##*/}
_CSPOC_OPT_STR="d:f?[g:n:]"
_OPT_STR="+1d:f?s:l:n?m:t:V:y:C?r:E?P:v:[I?B?S?]"
_USAGE="$(dspmsg -s 115 cspoc.cat 1 'Usage: cl_mkvg4vp -cspoc \"[-f] [-g ResourceGroup | -n NodeList]\" [-d MaxPVs] [-B] [-G] [-c | -C] [-l true | false] [-x] [-i] [-s PPsize] [-n] [-m MaxPVsize | -t factor] [-r ResourceGroup] [-E] [-V MajorNumber] -f -y VGname PhysicalVolumes')"
_MSET=115
_LVM_MSET=5
CL_DATFILE="/tmp/cllsvpathids.out"
NOVARYON="FALSE"
#
: Here MAX_USER_RESOURCES value is taken from MAXUSERRESOURCES
: value in cluster.h file. MAX_USER_RESOURCES value indicates
: the maximum number of individual resources that can be
: configured in a resource group. Any change in the value of
: MAXUSERRESOURCES in cluster.h should be replicated here.
#
MAX_USER_RESOURCES=512
# This script requires HA 5.3.0.0 or higher
_VER="5300"
_VERSION="5.3.0.0"
# Include CELPP init code and verification routines.
#  ALTRAN_PROLOG_BEGIN_TAG                                                    
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                      
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog.
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/cspoc/plans/cl_init.cel 1.16.7.9 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1996,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/plans/cl_init.cel, 726, 2147A_aha726, Feb 05 2021 09:50 PM
################################################################################
#
# COMPONENT_NAME: CSPOC
#
# Name:
#       cl_init.cel
#
# Description:
#       C-SPOC Initialization Routine.  This routine is to be included
#       in all C-SPOC Execution Plans (e.g. '%include cl_init.cel').
#       It defines the ksh functions required to implement C-SPOC commands.
#
# Arguments:
#       None.
#
# Return Values:
#       None.
#
# Environment Variables Defined:
#
#   PUBLIC:
#       _OPT_STR            Specifies the list of valid command flags.
#                           Must be specified in the execution plan.
#
#       _CSPOC_OPT_STR      Specifies the list of valid CSPOC flags.
#                           Must be specified in the execution plan.
#
#       cspoc_tmp_log       Full path of the cspoc log file
#                           (/var/hacmp/log/cspoc.log).
#
#       _CLUSTER_NODES      A comma separated list of all nodes in the cluster.
#
#       _NODE_LIST          A comma separated list of nodes from the command
#                           line (i.e. Those specified by -n or implied by -g).
#
#       _TARGET_NODES       A comma separated list that specify the target
#                           nodes for a generated C-SPOC script.
#
#       BADNODES            A space-separated list that specifies the nodes
#                           that are either not defined in the cluster or not
#                           reachable for a generated C-SPOC script.
#
#       _RES_GRP            The resource group specified by -g on the
#                           command line
#
#       _SPOC_FORCE         Set to "Y" when -f specified.  Otherwise not set.
#
#       _DEBUG              Set to <debug_level> when -d specified.
#                           Otherwise not set.
#
#       _CMD_ARGS           The AIX Command Options and arguments from the
#                           C-SPOC command
#
#       _NUM_CMD_ARGS       The number of AIX Command Options and arguments
#                           from the C-SPOC command
#
#       _NON_FLG_ARGS       The non-flag arguments from the C-SPOC command.
#
#       _OF_NA              A list of the optional command flags specified
#                           that do NOT require an option argument.
#
#       _MF_NA              A list of the mandatory command flags specified
#                           that do NOT require an option argument.
#
#       _OF_WA              A list of the optional command flags specified
#                           that require an option argument.
#
#       _MF_WA              A list of the mandatory command flags specified
#                           that require an option argument.
#
#       _VALID_FLGS         A list of valid command flags.
#
#       _CSPOC_OPTS         The CSPOC Options specified on the command line
#                           following the '-cspoc' flag.
#
#       _CSPOC_OF_NA        A list of the optional CSPOC flags specified that
#                           do NOT require an option argument.
#
#       _CSPOC_MF_NA        A list of the mandatory CSPOC flags specified that
#                           do NOT require an option argument.
#
#       _CSPOC_OF_WA        A list of the optional CSPOC flags specified that
#                           require an option argument.
#
#       _CSPOC_MF_WA        A list of the mandatory CSPOC flags specified that
#                           require an option argument.
#
#       _CSPOC_VALID_FLGS   A list of valid CSPOC flags for this CSPOC command.
#
#       CLUSTER_OVERRIDE    Flag to Cluster Aware AIX Commands to signal that
#                           base AIX commands should be allowed to operate.
#                           Applies to 7.1.0 and later.
#
################################################################################
################################################################################
#
# _get_node_list
#
# DESCRIPTION:
#   Generates two lists _CLUSTER_NODES is a list of all nodes in the cluster.
#
################################################################################
function _get_node_list
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_node_list version 1.16.7.9"
        if (( $_DEBUG >= 8 )); then
            typeset PROGNAME="_get_node_list"
            set -x
        fi
    fi
    unset _CLUSTER_NODES
    typeset NODE IP_ADDR
    #
    : GET A comma separated LIST OF ALL NODES IN THE CLUSTER
    #
    _CLUSTER_NODES=$(IFS=, set -- $(clodmget -q "object = COMMUNICATION_PATH" -f name -n HACMPnode) ; print "$*")
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: CLUSTER NODES [${_CLUSTER_NODES}]"
        print "DEBUG: Leaving _get_node_list"
    fi
    #
    : ENSURE THAT NODES FOUND FOR THE CLUSTER
    #
    if [[ -z ${_CLUSTER_NODES} ]]; then
        nls_msg -2 21 6 \
            "${_CMD}: The cluster does not appear to be configured - no nodes are defined.  \n  Configure the cluster, nodes and networks then try this operation again.\n" $_CMD
        return 1
    fi
    return 0
} # End of "_get_node_list()"
################################################################################
#
# _get_target_nodes
#
# DESCRIPTION
#   Sets environment variable $_TARGET_NODES to the list of cluster
#   on which the C-SPOC command is to be executed.
#
#	1 - If a node list was specified $_TARGET_NODES is set to
#	    the nodes listed.
#
#	2 - If a resource group was specified $_TARGET_NODES is set
#	    to the list of nodes that are participating in that
#	    resource group.
#
#	3 - If neither a node list or resource group has been specified
#	    then $_TARGET_NODES is set to a list of all nodes in the cluster.
#
################################################################################
function _get_target_nodes
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_target_nodes version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_target_nodes"
            set -x
        fi
    fi
    typeset NODE=""
    integer GTN_RC=-1
    #
    : If given a node list, or the nodes in a resource group, use those
    #
    if [[ -n $_NODE_LIST || -n $_RG_NODE_LIST ]]
    then
        _TARGET_NODES=$(IFS=, set -- $_NODE_LIST $_RG_NODE_LIST ; print "$*")
        GTN_RC=0
    #
    : If no node list given, assume all cluster nodes, if we can find them
    #
    elif [[ -n $_CLUSTER_NODES ]]
    then
        _TARGET_NODES="$_CLUSTER_NODES"
        GTN_RC=0
    #
    : Else cannot figure out where to run this
    #
    else
        nls_msg -2 -l ${cspoc_tmp_log} 4 6 \
        "%s: Unable to determine target node list!\n" "$_CMD"
        GTN_RC=1
    fi
    return $GTN_RC
} # End of "_get_target_nodes()"
################################################################################
#
# _get_rgnodes
#
# DESCRIPTION
#   Gets a list of nodes associated with the resource group specified.
#
################################################################################
function _get_rgnodes
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_rgnodes version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_rgnodes"
            set -x
        fi
    fi
    if [[ -z $1 ]]
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 9 \
            "%s: _get_rgnodes: A resource group must be specified.\n" "$_CMD"
        return 1
    fi
    _RG_NODE_LIST=$(clodmget -q "group = $1" -f nodes -n HACMPgroup)
    if [[ -z $_RG_NODE_LIST ]]
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 50 \
            "%s: Resource group %s not found.\n" "$_CMD" "$1"
        return 1
    fi
    return 0
} # End of "_get_rgnodes()"
#######################################################################
#
# _getopts
#
# DESCRIPTION
#   Parses comand line options for C-SPOC commands.
#
#######################################################################
#
# OPTION STRING
#   The _getopts() routine requires the execution plan to define the
#   environment variable $_OPT_STR which is refered to as the option
#   string.  The option string is used to define valid and/or required
#   flags, the required number of non-flag arguments, and what flags
#   may or may not be specified together.
#
#    Operator   Description                                  Example
#    --------   ------------------------------------------   ---------
#	()	Groups mutually required flags               (c!d:)
#	[]	Groups mutually exclusive flags              [f,b,]
#
#	?	Optional flag (default)                      b?
#	!	Mandatory flag                               c!
#
#	:	Optional flag that requires an argument      d:
#	^	Mandatory flag that requires an argument     e^
#
#	.	Optional multi-byte flag
#	,	Mandatory multi-byte flag                    f,
#
#	+N	Indicates that N non-flag arguments are.     +2
#               required. It must be at the beginning of
#               the option string.
#
#   Notes:
#	1 - A flag that can be specified with or without an argument
#           would be specified twice as follows: _OPT_STR="a?a:"
#
#	2 - A flag that requires an argument cannot also be the first
#           letter of a multi-byte flag.  (i.e. -b arg -boot ) as there
#           is no way to differentiate between the two.
#
#  Example:
#    The following option string would correspond to the usage below
#    In the usage '[]' indicates optional flags and '()' indicates
#    grouping.
#
#	_OPT_STR="+2ab?(c!d:)e^[f,b,]g."
#
#    Usage:
#     cmd [-a] [-b] -c [-d arg] -e arg ( -foo | -bar ) [-go] arg1 arg2 [arg3]
#
#
#######################################################################
function _getopts
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _getopts 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_opts"
            set -x
        fi
    fi
    typeset CMD=${0##*/}
    # unset the following variables to avoid these variables being
    # influenced implicitly by external environment. Note that we will
    # not unset/touch _DEBUG since it is being checked even before hitting
    # this part of the code. i.e. depending upon the _DEBUG flag we set
    # set -x option initially itself.
    unset _NODE_LIST
    unset _RES_GRP
    unset _CSPOC_QUIET
    # LOCAL VARIABLES
    typeset _OPT_STR _CSPOC_OPT_STR OPT X Y
    typeset _VALID_FLGS _CSPOC_VALID_FLGS
    typeset _OF_NA _MF_NA _OF_WA _MF_WA
    typeset _CSPOC_OF_NA _CSPOC_MF_NA _CSPOC_OF_WA _CSPOC_MF_WA
    typeset _GOPT=no _NOPT=no
    # THE FIRST TWO ARGS MUST BE OPTION STRINGS
    _CSPOC_OPT_STR=$1
    _OPT_STR=$2
    shift 2
    # CHECK CSPOC OPT STRING SPECIFIED IN THE EXECUTION PLAN
    # FOR OPTIONAL OR REQUIRED FLAGS
    [[ $_CSPOC_OPT_STR == *g^* ]] && _GOPT=req
    [[ $_CSPOC_OPT_STR == *g:* ]] && _GOPT=opt
    [[ $_CSPOC_OPT_STR == *n^* ]] && _NOPT=req
    [[ $_CSPOC_OPT_STR == *n:* ]] && _NOPT=opt
    # CHECK IF THE OPTION STRINGS SPECIFY A REQUIRED NUMBER OF NON-FLAG ARGS
    if [[ $_OPT_STR == +* ]]
    then
        X=${_OPT_STR#??}
        Y=${_OPT_STR%"$X"}
        _OPT_STR=$X
        _NUM_ARGS_REQ=${Y#?}
    fi
    # PARSE THE OPTION STRING ($_OPT_STR) INTO FIVE LISTS
    #  ${_OF_NA} is a list of optional flags that DO NOT take an option arg.
    #  ${_MF_NA} is a list of mandatory flags that DO NOT take an option arg.
    #  ${_OF_WA} is a list of mandatory flags that DO take an option argument
    #  ${_MF_WA} is a list of optional flags that DO take an option argument
    #  ${_VALID_FLGS} is a list of all valid flags.
    # Note that both strings start and end with a space (to facilitate grepping)
    # and contain a list of space separated options each of which is preceded
    # by a minus sign.
    # THE FOLLOWING WHILE LOOP SIMPLY ORGANIZES THE VALID FLAGS INTO
    # FOUR LISTS THAT CORRESPOND TO THE FOUR FLAG TYPES LISTED ABOVE
    # AND A FIFTH LIST THAT INCLUDES ALL VALID FLAGS.
    X=${_OPT_STR}
    [[ $X == '-' ]] && X=""
    while [[ -n ${X} ]]
    do
        # GET THE NEXT LETTER OF THE OPTION STRING
        Y=${X#?}
        OPT=${X%"$Y"}
        X=${Y}
        # CHECK FOR AND PROCESS MUTALLY REQUIRED OR MUTUALLY EXCLUSIVE FLAGS
        case $OPT in
            '(') # STARTS A GROUP OF MUTUALLY REQUIRED FLAGS
                 if [[ -n $MUTREQ ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "("'
                     return 1
                 fi
                 MUTREQ=Y
                 continue
            ;;
            ')') # ENDS A GROUP OF MUTUALLY REQUIRED FLAGS
                 if [[ -z $MUTREQ ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character ")"'
                     return 1
                 fi
                 MUTREQ=""
                 MUTREQ_FLAGS=$MUTREQ_FLAGS" "
                 continue
            ;;
            '[') # STARTS A GROUP OF MUTUALLY EXCLUSIVE FLAGS
                 if [[ -n $MUTEX ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "["'
                     return 1
                 fi
                 MUTEX=Y
                 continue
            ;;
            ']') # ENDS A GROUP OF MUTUALLY EXCLUSIVE FLAGS
                 if [[ -z $MUTEX ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "]"'
                     return 1
                 fi
                 MUTEX=""
                 MUTEX_FLAGS=$MUTEX_FLAGS" "
                 continue
            ;;
        esac
        # KEEP A LIST OF MUTUALLY EXCLUSIVE FLAGS
        if [[ -n $MUTEX && $MUTEX_FLAGS != *${OPT}* ]]; then
            MUTEX_FLAGS=${MUTEX_FLAGS}${OPT}
        fi
        # KEEP A LIST OF MUTUALLY REQUIRED FLAGS
        if [[ -n $MUTREQ && $MUTREQ_FLAGS != *${OPT}* ]]; then
            MUTREQ_FLAGS=${MUTREQ_FLAGS}${OPT}
        fi
        # KEEP A LIST OF ALL VALID FLAGS
        _VALID_FLGS="${_VALID_FLGS} -$OPT"
        # DETERMINE THE FLAG TYPE AS DESCRIBED ABOVE
        # ADD THE FLAG TO THE APPROPRIATE LIST AND
        # STRIP OFF THE FLAG TYPE IDENTIFIER FROM
        # THE OPTION STRING '${_OPT_STR}'.
        case $X in
            '.'*) # OPTIONAL MULTI-BYTE FLAG
                  X=${X#.}
                  _OF_MB="${_OF_MB} -$OPT"
            ;;
            ','*) # MANDATORY MULTI-BYTE FLAG
                  X=${X#,}
                  _MF_MB="${_MF_MB} -$OPT"
            ;;
            ':'*) # OPTIONAL FLAG THAT REQUIRES AN ARGUMENT
                  X=${X#:}
                  _OF_WA="${_OF_WA} -$OPT"
            ;;
            '^'*) # MANDATORY FLAG THAT REQUIRES AN ARGUMENT
                  X=${X#^}
                  _MF_WA="${_MF_WA} -$OPT"
            ;;
            '!'*) # MANDATORY FLAG
                  X=${X#!}
                  _MF_NA="${_MF_NA} -$OPT"
            ;;
            '?'*) # OPTIONAL FLAG
                  X=${X#?}
                  _OF_NA="${_OF_NA} -$OPT"
            ;;
            *)    # OPTIONAL FLAG
                  _OF_NA="${_OF_NA} -$OPT"
            ;;
        esac
    done # End of the option "while" loop
    # TACK A SPACE ONTO THE END OF EACH LIST TO MAKE OPTION GREPPING SIMPLE
    _VALID_FLGS=$_VALID_FLGS" "
    _OF_NA=$_OF_NA" " ; _OF_WA=$_OF_WA" " ; _OF_MB=$_OF_MB" "
    _MF_NA=$_MF_NA" " ; _MF_WA=$_MF_WA" " ; _MF_MB=$_MF_MB" "
    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 3 )) 
    then
        print "DEBUG(3): _OF_NA=$_OF_NA"
        print "DEBUG(3): _MF_NA=$_MF_NA"
        print "DEBUG(3): _OF_WA=$_OF_WA"
        print "DEBUG(3): _MF_WA=$_MF_WA"
        print "DEBUG(3): _OF_MB=$_OF_MB"
        print "DEBUG(3): _MF_MB=$_MF_MB"
        print "DEBUG(3): _VALID_FLGS=$_VALID_FLGS"
    fi
    # PARSE THE COMMAND LINE ARGS
    let _NUM_CMD_ARGS=0
    while [[ -n $* ]]
    do
        THIS_FLAG=$1
        THIS_ARG=${THIS_FLAG#??}
        THIS_FLAG=${THIS_FLAG%"$THIS_ARG"}
        if [[ -n $_DEBUG ]]
        then
            print "THIS_FLAG=\"$THIS_FLAG\""
            print "THIS_ARG=\"$THIS_ARG\""
        fi
        if [[ $1 == '-cspoc' ]]
        then
            #
            :   Check for and process any CSPOC flags
            #
		_CSPOC_OPTS=$2
            if [[ -z $_CSPOC_OPTS || $_CSPOC_OPTS == *([[:space:]]) ]]
            then
                SHIFT=1
            else
                SHIFT=2
                while getopts ':fd#n:?g:q' _CSPOC_OPTION $_CSPOC_OPTS 
                do
                    case $_CSPOC_OPTION in
                        f ) :   Force option
                            export _SPOC_FORCE=Y
                        ;;
                        d ) :   Debug level
                            export _DEBUG=$OPTARG
                        ;;
                        n ) :   Target node list
                            export _NODE_LIST=$(print $OPTARG | sed -e"s/['\"]//g")
                        ;;
                        g ) :   Target resource group 
                            export _RES_GRP=$(print $OPTARG | sed -e"s/['\"]//g")
                        ;;
                        q ) :   Suppress output to stdout
                            export _CSPOC_QUIET=YES
                        ;;
                        : ) :   Missing operand - ignored
                        ;;
                        * ) :   Invalid flag specified
                            nls_msg -2 -l ${cspoc_tmp_log} 4 13 \
                                "%s: Invalid C-SPOC flag [%s] specified.\n" \
                                "$_CMD" "$_CSPOC_OPTION"
                            print "$_USAGE"
                            exit 2
                        ;;
                    esac
                done
            fi
            #
            :   Validate required and mutually exclusive CSPOC operands
            #
            if [[ $_GOPT == "no" && -n $_RES_GRP ]]
            then
                #
                :   Is "-g" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 60 \
                    "%s: C-SPOC -g flag is not allowed for this command.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_NOPT == "no" && -n $_NODE_LIST ]]
            then
                #
                :   Is "-n" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 61 \
                    "%s: C-SPOC -n flag is not allowed for this command.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_GOPT == "req" && $_NOPT == "req" ]] && \
                 [[ -z $_RES_GRP && -z $_NODE_LIST ]]
            then    
                #
                :   Check for "-g" or "-n" present when one
                :   or the other is required
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 62 \
                    "%s: Either the '-g' or the '-n' C-SPOC flag must be specified.\n" "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ -n $_RES_GRP && -n $_NODE_LIST ]]
            then
                #
                :   Check that both "-g" and "-n" are not specified together
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 63 \
                    "%s: C-SPOC -g and -n flags are mutually exclusive.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_NOPT != "req" && $_GOPT == "req" && -z $_RES_GRP ]]
            then
                #
                :   Is only "-g" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 64 \
                    "%s: C-SPOC -g flag is required.\n" "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_GOPT != "req" && $_NOPT == "req" && -z $_NODE_LIST ]]
            then
                #
                :   Is only "-n" required
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 65 \
                    "%s: C-SPOC -n flag is required.\n" "$_CMD"
                print "$_USAGE"
                return 2
            fi
            shift $SHIFT
        elif [[ "$THIS_FLAG" != -* ]]
        then
            #  AIX COMMAND ARGUMENT THAT IS NOT AN OPTION FLAG
            #  NEED TO ACCOMODATE OPTIONS THAT MAY OR MAY NOT HAVE AN ARGUMENT.
            #  IF OPT_ARG DOESN'T START WITH A '-' ITS AN ARGUMENT OTHERWISE
            #  CONSIDER IT TO BE THE NEXT OPTION
            let _NUM_CMD_ARGS=$_NUM_CMD_ARGS+$#
            TMP_FLAG=""
            while (( $# > 0 ))
            do
                case "$1" in
                    -*) TMP_FLAG=$(echo $1 | cut -c1-2)
                        _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$TMP_FLAG"
                        TMP_ARG1=$(echo $1 | cut -c3-)
                        if [[ -n $TMP_ARG1 ]] 
                        then
                            TMP_ARG1="$(print -- $TMP_ARG1 |\
                                        clencodearg $_ENCODE_ARGS)"
                            _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$TMP_ARG1"
                            TMP_FLAG=""
                        fi
                    ;;
                    *) TMP_ARG2="$(print -- $1 | clencodearg $_ENCODE_ARGS)"
                       _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}${TMP_ARG2}
                       if [[ -z $TMP_FLAG ]]
                       then
                           _NON_FLG_ARGS=${_NON_FLG_ARGS:+"${_NON_FLG_ARGS} "}"${TMP_ARG2}"
                       fi
                       TMP_FLAG=""
                esac
                shift
            done
            break
        else	# COME INTO HERE WITH $THIS_FLAG and $THIS_ARG SET
            ARG_CHECK=Y
            ARG_NEXT=""
            while [[ -n $ARG_CHECK ]]
            do
                # NOW CHECK IF WE STILL HAVE MORE FLAGS TO PROCESS
                [[ -z $THIS_ARG ]] && ARG_CHECK=""
                if print -- "$_OF_MB $_MF_MB" | grep -- "$THIS_FLAG" > /dev/null
                then
                    # THIS IS A MULTI-BYTE FLAG
                    if [[ -z $THIS_ARG ]]
                    then
                        ( print -- "$_OF_NA $_MF_NA" | grep -- "$THIS_FLAG" > /dev/null ) || \
                        {
                            # THIS FLAG REQUIRES AN ARGUMENT
                            nls_msg -2 -l ${cspoc_tmp_log} 4 19 \
                                "%s: Invalid option [%s].\n" "$_CMD" "$1"
                            print "$_USAGE"
                            exit 2
                        }
                    fi
                    # VALID AIX COMMAND MULTI-BYTE OPTION (WITHOUT AN ARGUMENT)
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG$THIS_ARG"
                    shift
                    ARG_CHECK=""	# Disable further processing of $THIS_ARG as flags
                elif print -- "$_OF_WA $_MF_WA" | grep -- "$THIS_FLAG" > /dev/null
                then
                    # THIS IS A FLAG THAT REQUIRES AN ARGUMWENT
                    # HANDLE OPTIONAL SPACE BETWEEN FLAG AND ITS ARG
                    if [[ -z $THIS_ARG && -z $ARG_NEXT ]]
                    then
                        THIS_ARG=$2		# THERE WAS A SPACE
                        SHIFT=2
                    else
                        SHIFT=1		# THERE WAS NO SPACE
                    fi
                    # NOW VALIDATE THAT WE HAVE AN ARG AND THAT IT IS VALID
                    if [[ -z $THIS_ARG || $THIS_ARG == -* ]]
                    then
                        # IF THERE IS NO ARG THEN CHECK IF FLAG MAY BE SPECFIED WITHOUT ONE
                        print -- "$_OF_NA $_MF_NA" | grep -q -- "$THIS_FLAG" ||\
                        {
                            # THIS FLAG REQUIRES AN ARGUMENT
                            nls_msg -2 -l ${cspoc_tmp_log} 4 19 \
                            "%s: Option [%s] requires an argument.\n" "$_CMD" "$1"
                            print "$_USAGE"
                            exit 2
                        }
                    fi
                    # VALID AIX COMMAND OPTION WITH AN ARGUMENT
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG $(print -- $THIS_ARG | clencodearg $_ENCODE_ARGS)"
                    shift $SHIFT
                    # Disable further processing of $THIS_ARG as flags
                    ARG_CHECK=""
                elif print -- "$_OF_NA $_MF_NA" | grep -q -- "$THIS_FLAG"
                then
                    # THIS IS A FLAG THAT DOES NOT TAKE AN ARGUMENT
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG"
                    # IF THIS FLAG WAS OBTAINED FROM $THIS_FLAG THEN WE WANT TO
                    # SHIFT. IF IT WAS OBTAINED FROM $THIS_ARG THEN WE DONT
                    [[ -z $ARG_CHECK ]] && shift
                    # THIS FLAG DOES NOT TAKE AN OPTION ARGUMENT SO ASSUME
                    # THAT "$THIS_ARG" SPECIFIES MORE FLAGS TO PROCESS.
                    if [[ -n $THIS_ARG ]]
                    then
                        # GET THE NEXT FLAG, ADJUST $THIS_ARG,
                        # AND KEEP PROCESSING.
                        X=${THIS_ARG#?}
                        THIS_FLAG="-${THIS_ARG%$X}"
                        THIS_ARG=$X
                        ARG_NEXT=Y
                    fi
                else
                    nls_msg -2 -l ${cspoc_tmp_log} 4 26 \
                    "%s: Invalid option [%s].\n" "$_CMD" "$1"
                    print "$_USAGE"
                    exit 2
                fi
            done
        fi
    done
    ##
    # PERFORM CHECKING OF THE AIX COMMAND FLAGS
    ##
    # CHECK FOR REQUIRED NUMBER OF NON-FLAG ARGUMENTS
    if (( ${_NUM_CMD_ARGS:-0} < ${_NUM_ARGS_REQ:-0} ))
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 27 \
            "%s: Missing command line arguments.\n" "$_CMD"
        print "$_USAGE"
        return 2
    fi
    # THIS IS WHERE WE CHECK FOR MANDATORY FLAGS, MUTUALLY EXCLUSIVE FLAGS,
    # AND MUTUALLY REQUIRED FLAGS
    # CHECK FOR MUTUALLY REQUIRED FLAGS
    # FOR EACH GROUP OF FLAGS SPECIFIED IN $MUTREQ_FLAGS WE WILL COUNT HOW
    # MANY WE NEED AND HOW MANY ARE GIVEN ON CMD LINE.  IF THESE VALUES ARE
    # NOT EQUAL PRINT AN ERROR AND RETURN NON-ZERO
    typeset -i CNT=0 N=0
    for GROUP in $MUTREQ_FLAGS
    do
        # GET A COUNT OF HOW MANY FLAGS IN THIS GROUP
        print -n $GROUP | wc -c | read N
        integer CNT=0
        F=""
        while [[ -n $GROUP ]]
        do
            # GET THE NEXT FLAG IN THE GROUP
            A=${GROUP#?}
            B=${GROUP%"$A"}
            GROUP=$A
            # IF THIS FLAG IS USED INCREMENT THE COUNTER
            if [[ "$(print -- $_CMD_ARGS | grep -- '-'${B})"' ' != ' ' ]]
            then
                (( CNT = CNT + 1 ))
            fi
            F=${F:+"$F, "}"-"$B
        done
        # VERIFY THAT THE COUNTER EQUALS THE TOTAL NUMBER OF FLAGS IN THE GROUP
        if (( $CNT != $N ))
        then
            print "$_CMD: One or more flags [$F] were not specified."
            print "$_CMD: Specifying any one of these flags requires the others."
            return 2
        fi
    done
    # CHECK FOR MUTUALLY EXCLUSIVE FLAGS
    # FOR EACH GROUP OF FLAGS SPECIFIED IN $MUTEX_FLAGS WE WILL COUNT HOW
    # MANY ARE GIVEN ON CMD LINE.  IF MORE THAN ONE IS GIVEN THEN PRINT
    # AN ERROR AND RETURN NON-ZERO
    for GROUP in $MUTEX_FLAGS
    do
        # GET A COUNT OF HOW MANY FLAGS IN THIS GROUP
        integer CNT=0
        F=""
        while [[ -n $GROUP ]]
        do
            # GET THE NEXT FLAG IN THE GROUP
            A=${GROUP#?}
            B=${GROUP%"$A"}
            GROUP=$A
            # IF THIS FLAG IS USED INCREMENT THE COUNTER
            if [[ -n "$(print -- $_CMD_ARGS | grep -- '-'${B})" ]]
            then
                (( CNT = CNT + 1 ))
            fi
            F=${F:+"$F, "}"-"$B
        done
        # VERIFY THAT THE COUNTER EQUALS THE TOTAL NUMBER OF FLAGS IN THE GROUP
        if (( $CNT > 1 ))
        then
            print "$_CMD: The flags [$F] are mutually exclusive."
            print "$_CMD: Only one of these flags may be specified."
            return 2
        fi
    done
    # CHECK FOR ALL MANDATORY FLAGS
    for X in $_MF_NA $_MF_WA
    do
        # CHECK THAT MANDATORY FLAG IS ON COMMAND LINE
        if [[ -z "$(print -- $_CMD_ARGS | grep -- ${X})" ]]
        then
            # THE FLAG WAS NOT SPECIFIED SO WE MUST FIRST CHECK IF ANOTHER
            # FLAG WAS SPECIFIED THAT IS MUTUALLY EXCLUSIVE WITH THIS ONE.
            for GROUP in $MUTEX_FLAGS
            do
                OK=""
                while [[ -n $GROUP ]]
                do
                    Y=${GROUP#?}
                    Z=${GROUP%"$Y"}
                    GROUP=$Y
                    print -- " $_CMD_ARGS " |\
                        grep -- "-${Z} " > /dev/null && OK=Y
                done
                [[ -n $OK ]] && break
            done
            # "$OK" IS NULL IF NO FLAG IN THIS MUTEX GROUP WAS GIVEN
            if [[ -z $OK ]]
            then
                nls_msg -2 -l ${cspoc_tmp_log} 4 29 \
                "%s: Mandatory option [%s] not specified.\n" "$_CMD" "$X"
                print "$_USAGE"
                return 2
            fi
        fi
    done
    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 3 )) 
    then
        print -- "DEBUG(3): _CMD_ARGS=$_CMD_ARGS"
    fi
    return 0
} # End of "_getopts()"
################################################################################
#
# DESCRIPTION:
#   Updates the C-SPOC logfile
#
################################################################################
function cexit
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering cexit version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME=cexit
            set -x
        fi
    fi
    typeset USAGE="USAGE: cexit <temp_log_file> <return_code>"
    # CHECK USAGE
    (( $# != 2 )) && print "$_CMD: $USAGE"
    typeset TEMP_LOG=$1
    typeset RC=$2
    #
    : Read the HACMPlogs ODM for the pathname of the cspoc.log log file
    : If the ODM is empty or corrupted, use /var/hacmp/log/cspoc.log
    #
    DESTDIR=$(clodmget -q "name = cspoc.log" -f value -n HACMPlogs)
    if [[ -n $DESTDIR ]]
    then
        CSPOC_LOG="$DESTDIR/cspoc.log"
    else
        dspmsg scripts.cat 463 "The cluster log entry for %s could not be found in the HACMPlogs ODM.\n" "cspoc.log"
        dspmsg scripts.cat 464 "Defaulting to log directory %s for log file %s.\n" "/var/hacmp/log" "cspoc.log"
        CSPOC_LOG="/var/hacmp/log/cspoc.log"
    fi
    #
    : CHECK ARGS
    #
    if [[ ! -f ${TEMP_LOG} ]]
    then
        nls_msg -2 -l ${CSPOC_LOG} 4 39 \
            "%s: Unable to open file: %s\n" "${TEMP_LOG}" "$_CMD"
        return 1
    fi
    #
    :  If the log file does not exist, create it.
    #
    if [[ ! -f ${CSPOC_LOG} ]]; then
        touch ${CSPOC_LOG}
    fi
    #
    :  Keep the information in the log file if we have write permission
    #
    if [[ -w $CSPOC_LOG ]]
    then
        cat ${TEMP_LOG} >> $CSPOC_LOG
    fi
    if (( $RC == 0 )) && ( [[ -z $_DEBUG ]] || (( $_DEBUG <= 8 )) ) then
        rm -f ${TEMP_LOG%_*}*
        rm -f /tmp/cel$$_s*.err
        rm -f /tmp/cel$$_s*.out
        rm -f /tmp/cel$$.cache
    fi   
} # End of "cexit()"
################################################################################
#
# _cspoc_verify - Performs verification of a number of CSPOC requirments.
#                 Certain requirements, if not met, produce a hard error
#                 and the routine produces an immediate exit of the script.
#                 Other requirements, if not met, produce soft errors that
#                 result in the routine returning a value of '1'.  The
#                 calling script will then exit unless the CSPOC force flag
#                 has been set.
#
################################################################################
function _cspoc_verify
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _cspoc_verify version 1.16.7.9 + 20527,842,758"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_cspoc_verify"
            set -x
        fi
    fi
    typeset NODE 
    typeset bad_targets		#   space separated list of unreachable nodes
    typeset CAA_down_nodes	#   target hosts CAA says are down
    typeset CAA_node_name	#   CAA host node name
    integer _RETCODE=0		#   Assume OK until proven otherwise
    typeset BADNODES		#   Space separated list of invalid nodes
    typeset down_ha_nodes	#   target HA nodes CAA says are down
    typeset good_targets	#   target HA nodes that should work
    typeset bad_level_nodes	#   target HA nodes below minimum release level
    if [[ $_CSPOC_CALLED_FROM_SMIT != 'true' ]]
    then
	#
	:   If not called from SMIT, which will surely set things
	:   up correctly, check to make sure target nodes are valid.
	#
        for NODE in $(IFS=, set -- $_TARGET_NODES ; print $*)	
	do
	    #
	    :   Collect a list of given nodes that do not
	    :   show up in the local cluster definition.
	    #
	    if [[ $_CLUSTER_NODES != @(?(*,)$NODE?(,*)) ]]
	    then
		BADNODES=${BADNODES:+$BADNODES" "}$NODE
		nls_msg -2 -l ${cspoc_tmp_log} 4 44 \
		"%s: The node [%s] is not a part of this cluster.\n" "$_CMD" "$NODE"
	    fi
	done
	if [[ -n $BADNODES ]]
	then
	    #
	    :	Remove any invalid node names from the node list
	    #
	    save_targets=""
	    for ha_node in $(IFS=, set -- $_TARGET_NODES ; print $*)
	    do
		if [[ $BADNODES != @(?(* )${ha_node}?( *)) ]]
		then
		    save_targets=${save_targets:+"${save_targets},"}${ha_node}
		fi
	    done
	    _TARGET_NODES=$save_targets
	    if [[ -z $_TARGET_NODES ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because all of the target nodes, %s, are not part of this cluster\n" "$_CMD" $LINENO "$BADNODES"
		exit 1	    #	No valid nodes found
	    else
		_RETCODE=1  #	Continue if 'forced' specified
	    fi
	fi
    fi
    cluster_version=$(clodmget -f cluster_version -n HACMPcluster)
    if [[ -x /usr/lib/cluster/incluster ]] && /usr/lib/cluster/incluster || \
       (( $cluster_version >= 15 )) 
    then
	#
	:   If at a level where CAA is in place, check to see if
	:   CAA can provide information on the state of nodes.
	#
	LC_ALL=C lscluster -m 2>/dev/null | \
	egrep 'Node name:|State of node:' | \
	cut -f2 -d: | \
	paste -d' ' - - | \
	while read CAA_node_name state
	do
	    if [[ -n $CAA_node_name ]]
	    then
		if [[ $state != 'UP' && \
		    $state != @(?(* )NODE_LOCAL?( *)) && \
		    $state != @(?(* )REACHABLE THROUGH REPOS DISK ONLY?( *)) &&  \
		    $state != 'DOWN  STOPPED' ]]
		then
		    #
		    #	The purpose of this check is to avoid long timeouts
		    #	trying to talk to a node known to be dead.
		    #	- The local node is always reachable
		    #	- A stopped node may be reachable; halevel checks below
		    #	- A node reachable only through the repository disk
		    #	  may be reachable: just because CAA declares the 
		    #	  network to be down doesn't mean clcomd can't get 
		    #	  through; hlevel checks below
		    #
		    :   Node $CAA_node_name is 'DOWN' 
		    #
		    CAA_down_nodes=${CAA_down_nodes:+"${CAA_down_nodes} "}${CAA_node_name}
		    #
		    :   Find the PowerHA node name corresponding to the
		    :   $CAA_node_name - the name must be a label on an
		    :   interface on some node.
		    #
		    host_ip=$(LC_ALL=C host $CAA_node_name | cut -f3 -d' ')
		    host_ip=${host_ip%,}
		    if [[ -n $host_ip && $host_ip == @(+([0-9.])|+([0-9:])) ]]
		    then
			down_ha_node=$(clodmget -q "identifier = ${host_ip}" -f nodename -n HACMPadapter)
			if [[ -n $down_ha_node ]] 
			then
			    down_ha_nodes=${down_ha_nodes:+"$down_ha_nodes "}${down_ha_node}
			    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
			    "%s[%d]: The CAA lscluster command indicates that node %s[%s] is \"%s\" and not active.\n" "$_CMD" $LINENO $down_ha_node $CAA_node_name "$state"
			fi
		    fi
		fi
	    fi
	done
    fi
    if [[ -n $down_ha_nodes ]]
    then
	#
	:   CAA says that nodes $down_ha_nodes are not active
	:   Construct a list of the remaining nodes, to use to
	:   check to see if clcomd is running.
	#
	for ha_node in $(IFS=, set -- $_TARGET_NODES ; echo $* )
	do
	    if [[ $down_ha_nodes != @(?(* )${ha_node}?( *)) ]]
	    then
		good_targets=${good_targets:+"${good_targets} "}${ha_node}
	    fi
	done
    else
	#
	:   CAA gives no reason to suspect nodes are not reachable
	#
        good_targets=$(IFS=, set -- $_TARGET_NODES ; echo $* )
    fi
    #
    :   CAA has not ruled out talking to node $good_targets
    #
    if [[ -n $_SPOC_FORCE ]] && /usr/lib/cluster/incluster
    then
	#
	:   It is possible that the target node list contains names
	:   that do not correspond to CAA host names after the CAA
	:   cluster is created.  
	#   Before the CAA cluster is created, all target nodes are
	#   naturally not in a CAA cluster.  Ordinarily, this can be
	#   left to clhaver to find, though it does not distinguish
	#   between nodes it cannot connect to, and nodes that are
	#   that are not in the CAA cluster.  If the force flag was
	#   specified, and we are already in a CAA cluster, 
	:   Silently elide names in the target list that do not 
	:   correspond to CAA host names.
	#
	save_targets=$good_targets
	good_targets=""
	for given_node in $save_targets
	do
	    if cl_query_hn_id -q -i $given_node >/dev/null 2>&1
	    then
		good_targets=${good_targets:+"${good_targets} "}${given_node}
	    else
		print "$(date) ${_CMD}._cspoc_verify[$LINENO]: Given target \"$given_node\" cannot be converted to a CAA host name.  It will be skipped." >> $clutilslog
	    fi
	done
    fi 
    if [[ -n $good_targets ]]
    then
	#
	:	CAA thinks that nodes \"$good_targets\"
	:	are active.  See if clcomd can talk to them, 
	:	and what level of PowerHA is present.
	#
	clhaver -c $_VER $good_targets | \
	while IFS=: read ha_node caa_host VRMF
	do
	    if [[ -z $caa_host ]]
	    then
		#
		:   Add $ha_node, which clhaver cannot communicate to 
		:   through clcomd, to the list of nodes not to try 
		:   to run the command on.
		#
		down_ha_nodes=${down_ha_nodes:+"${down_ha_nodes} "}${ha_node}
	    elif (( $VRMF < $_VER ))
	    then
		#
		:   Add $ha_node to the list of nodes below the minimum
		:   HA release level.
		#
		bad_level_nodes=${bad_level_nodes:+"${bad_level_nodes} "}${ha_node}
	    fi
	done
	if [[ -n $bad_level_nodes ]]
	then
	    #
	    :   Nodes \"$bad_level_nodes\" report that they are running a
	    :   version of PowerHA below the required level $_VERSION
	    #
	    if [[ -z $_SPOC_FORCE ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because the following nodes are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes"
	    elif [[ $bad_level_nodes == $good_targets ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because all nodes are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes"
	    else
		#
		:   If force was specified, command processing continues
		:   but skips nodes \"$bad_level_nodes\"
		#
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will be run, but not on the following nodes, which are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes" 
	    fi
	    down_ha_nodes=${down_ha_nodes:+"${down_ha_nodes} "}${bad_level_nodes}
	    _RETCODE=1
	fi 
    fi
    if [[ -n $down_ha_nodes ]]
    then
	#
	:   The nodes in \$down_ha_nodes, \"$down_ha_nodes\", are not acceptable
	:   targets for this command, either because CAA says they are down,
	:   or clcomd cannot talk to them, or they are running too far a back
	:   level of PowerHA.  Remove them from the list of C-SPOC target nodes.
	#
	save_targets=""
	for ha_node in $good_targets
	do
	    if [[ $down_ha_nodes != @(?(* )${ha_node}?( *)) ]]
	    then
		save_targets=${save_targets:+"${save_targets} "}${ha_node}
	    fi
	done
	good_targets=$save_targets
	bad_targets=$(IFS=, set -- $down_ha_nodes ; print "$*" )
	if [[ -z $good_targets ]]
	then
	    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
	    "%s[%d]: The command will not be run because all of the target nodes, %s, are not reachable\n" "$_CMD" $LINENO "$bad_targets"
	    exit 1
	elif [[ -n $bad_targets ]]
	then
	    if [[ -z $_SPOC_FORCE ]]
	    then
		if [[ $bad_targets == @(*,*) ]]
		then
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will not be run because the target nodes, %s, are not reachable\n" "$_CMD" $LINENO "$bad_targets"
		else
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will not be run because the target node, %s, is not reachable\n" "$_CMD" $LINENO "$bad_targets"
		fi
		_RETCODE=1
	    else
		if [[ $bad_targets == @(*,*) ]]
		then
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will be run, but not on the unreachable nodes %s\n" "$_CMD" $LINENO "$bad_targets"
		else
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will be run, but not on the unreachable node %s\n" "$_CMD" $LINENO "$bad_targets"
		fi
	    fi
	fi
    fi
    _TARGET_NODES=$(IFS=, set -- $good_targets ; print "$*" )
    #
    :   \$_TARGET_NODES, \"$_TARGET_NODES\", is a list of nodes that are 
    :   up, contactable by clcomd, and running a reasonably up to date
    :   level of PowerHA.
    #
    return $_RETCODE
} # End of "_cspoc_verify()"
################################################################################
#
#   Start of main, Main, MAIN
#
################################################################################
if [[ -n $_DEBUG ]]
then
    print "\n[C-SPOC Initialization Started version 1.16.7.9"
fi
_VER=${_VER:-"6100"}
_VERSION=${_VERSION:-"6.1.0.0"}
export CLUSTER_OVERRIDE="yes"   # Allow CAAC commands to run...      710
_CMD=${0##*/}
integer TRY_RC=0
#
: since root is needed to determine node lists and what not - clgetaddr
: we may as well disable everything right here right now.  By putting
: in an explicit check we can provide a more intuitive message rather
: than something about not being able to execute some command later on.
#
if [[ $(whoami) != "root" ]] && ! ckauth PowerHASM.admin
then
    nls_msg -2 -l ${cspoc_tmp_log} 4 52 \
    "%s: All C-SPOC commands require the user to either be root, or have PowerHASM.admin authorization\n" "$_CMD"
    exit 2
fi
#
: Set a default value, unless this script is called from SMIT, in which
: case _CSPOC_MODE will already be defined.  By default, this should determine
: what the request mode type.
#
export _CSPOC_MODE=${_CSPOC_MODE:-"both"}
#
: By default, assume that we are being called from the command line
#
export _CSPOC_CALLED_FROM_SMIT=${_CSPOC_CALLED_FROM_SMIT:-"false"}
#
: Make sure that the _CMD_ARGS variable is visible everywhere
#
export _CMD_ARGS=""
[[ -n $_DEBUG ]] && print "\n[Parsing Command Line Options ... ]"
#
:   Tell clencodearg to skip the special escape processing for '='
#
if [[ $SKIP_EQ_ESC == true ]]
then
    export _ENCODE_ARGS="-e"
else
    export _ENCODE_ARGS=""
fi
_CSPOC_OPT_STR=${_CSPOC_OPT_STR:--}
_OPT_STR=${_OPT_STR:--}
_getopts "$_CSPOC_OPT_STR" "$_OPT_STR" "$@" || exit 1
if [[ -n $_DEBUG ]]
then
    print "_CMD_ARGS=${_CMD_ARGS}"
    print "_NUM_CMD_ARGS=${_NUM_CMD_ARGS}"
    print "_NON_FLG_ARGS=${_NON_FLG_ARGS}"
    print "\n[Getting Cluster Node List ... ]"
fi
#
:   Determine the nodes in the cluster, and the nodes to which this operation
:   aplies.
#
export ODMDIR=/etc/objrepos
_get_node_list || exit 1
_get_target_nodes || exit 1
if [[ -n $_DEBUG ]]
then
    print "_CLUSTER_NODES=${_CLUSTER_NODES}"
    print "\n[Verifying C-SPOC Requirements ... ]"
fi
if [[ -z $clutilslog ]]
then
   clutilslog=$(clodmget -q 'name = clutils.log' -f value -n HACMPlogs)"/clutils.log"
fi
#
:   If not all nodes are reachable, stop now, unless the "force" flag was
:   specified, implying continue despite unreachable nodes
#
_cspoc_verify || {
    [[ -z $_SPOC_FORCE ]] && exit 1
}
if [[ -n $_DEBUG ]]
then
    print "\n[C-SPOC Initialization Completed.]"
    print "DEBUG: Entering ${0##*/}"
    (( $_DEBUG >= 8 )) && set -x
fi
# Include the lvm utilities for physical volumes
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2020,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/cspoc/plans/lvm_utils.cel 1.61.1.8 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1998,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/plans/lvm_utils.cel, 726, 2147A_aha726, Feb 05 2021 09:50 PM 
#
###############################################################################
#
# _get_physical_volumes
#
# Grab the physical volume names from the command line, if any were provided.
# Also keep a record of the physical id's of those volumes from the reference
# node.
#
# Variables used:
#
#    _CMD_ARGS
#
# Variables set:
#
#    _DNAMES    -  space separated list of physical disk names
#    _EDNAMES   -  space separated list of encoded physical disk names
#    _REFNODE   -  the reference node provided by the user with -R
#
###############################################################################
function _get_physical_volumes
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _get_physical_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_get_physical_volumes
	    set -x
	}
    }
    typeset DISKS
    typeset ENODE
    typeset PV
    _DNAMES=""
    ENODE=""
    _REFNODE=""
    #
    :	If the -R switch was provided on the command line to identify a
    :	reference node, pick up that node name
    #
    ENODE=$(print -- $_CMD_ARGS | sed -n 's/.*\-R *\([^ ]*\).*/\1/p')
    [[ -n $ENODE ]] && {
	#
	:   Remove the -R switch, and its argument, from the command line
	:   and save away the reference node.
	#
	_CMD_ARGS=$(print -- $_CMD_ARGS | sed -e 's/\-R *[^ ]*//')
	_REFNODE=$(print -- $ENODE | cldecodearg)
    }
    #
    :	At this point, the expectation is that the command as entered ended in
    :	a list of hdisk names.  These have been collected by cl_init into
    :	_NON_FLG_ARGS so called because they are not preceeded by a flag such
    :	as "-d"
    #
    DISKS=${_NON_FLG_ARGS##+([ ])}          # trim leading blanks
        #
    :	If no disks were provided, a reference node is redundant
    #
    if [[ -z $DISKS ]]
    then
	[[ -n $_REFNODE ]] && {
	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 23 "${_CMD}: No disks provided.  Ignoring -R option.\n" ${_CMD} 
	    _REFNODE=""			    # avoid processing reference node later
	}
    else
        #
	:   If disk names were given, trim them off of the string of the
	:   complete set of arguments to this command.  This is so that when
	:   they have been resolved relative to the reference node, they can
	:   just be appended back onto the string of other arguments.
        #
	_CMD_ARGS=${_CMD_ARGS%% ${DISKS}}
        #
        :   Physical volumes were provided - a reference node is required.
        :   That is, since hdisk names are not guaranteed unique across the
        :   cluster, we have to know on which node are the names meaningful.
        #
	[[ -z $_REFNODE ]] && {
	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 22 "${_CMD}: The -R switch is required when providing physical volumes.\n" ${_CMD} 
	    exit 1
	}
        #
        :   Create a space separated list of these physical disk names, in
        :   both encoded and decoded form.
        #
	for PV in $DISKS
	do
	    _EDNAMES=${_EDNAMES:+"${_EDNAMES} "}"${PV}"
	    _DNAMES=${_DNAMES:+"${_DNAMES} "}"$(print -- $PV | cldecodearg)"
	done
    fi
}
###############################################################################
#
# _verify_physical_volumes
#
# Verifies that the physical disks provided on the command line are valid
# for the volume group being operated upon.
#
# Arguments:
#
#    _VG        -  the volume group
#    _CHECK_VG  -  true if we should verify physical volumes against those that
#                  belong to the volume group.
#		-  false if we should verify that the given physical volumes
#		   belong to no volume group
#    _CHECK_ALL -  true if we should determine if the user selected all disks
#                  belonging to the volume group.
#    _NODE	-  Node (typically the reference node) on which the disk names
#		   are valid
#
# Variables used:
#
#    _DNAMES    -  the list of physical disk names
#
# Variables set:
#
#    _PVID_LIST - the list of PVID's for the provided physical disk names.
#    _EDNAMES   -  the list of encoded physical disk names on the node on
#		    which the command will be run
#    _SELECTED_ALL  - set to "true" if _DNAMES contains all the disks in the 
#		      volume group, to "false" otherwise
#    _IMPORT_PVID   - PVID to use to pick up volume group changes across the
#		      cluster
#
###############################################################################
function _verify_physical_volumes
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _verify_physical_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_verify_physical_volumes
	    set -x
	}
    }
    #
    :	Check for proper input
    #
    (( $# < 3 )) && {
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 55 "${_CMD}: wrong number of arguments\n" ${_CMD} 
	exit 1
    }
    #
    :	Check to see if any work is required here
    #
    if [[ -z $_DNAMES && $_CSPOC_MODE == "concurrent" && -z $I_NODES ]] && 
       [[ -z $CA_FLAG ]] 
    then
	#
	:	If the list of disks provided by the user is empty, and this is
	:	a concurrent request, and the volume group is varyed on all nodes,
	:	we do not need to go any further, since we will not have to be doing
	:	any importing.  
	#
	return
    fi
    #
    :	For serial requests, or situations where a concurrent
    :	volume group is not online on all nodes, or where an explicit
    :	corrective action has been requested, an _IMPORT_PVID is still needed.
    #
    typeset _VG="$1"
    typeset _CHECK_VG="$2"
    typeset _CHECK_ALL="$3"
    typeset _NODE="$4"
    typeset _AVL_DISKS=""
    typeset _BAD_DISKS=""
    typeset _SVG
    typeset _CLNODE
    typeset PV
    typeset _D
    typeset _USE_REFNODE
    typeset _DISK=""
    typeset _disk_info=""
    #
    :	Check to see if the list of disks has to be resolved with respect to
    :	the reference node.  If no reference node is given, or if the
    :	reference node is the same as the node on which the command is going
    :	to be run, no such resolution is required.
    #
    if [[ -n $_REFNODE && $_REFNODE != $_NODE ]]
    then
	_USE_REFNODE="true"
    else
	_USE_REFNODE="false"
    fi
    if [[ $_CHECK_VG == "true" ]]
    then
	#
	:   Verify that the physical volumes belong to the volume group
	:   provided
	#
	_SVG=$_VG
    else
	#
	:   Verify that the physical volumes belong to no volume group
	#
	_SVG=None
    fi
    #
    :	If the given disk names in $_DNAMES has to be interpreted with respect
    :	to a reference node, find out what names are in use there.
    #
    if [[ $_USE_REFNODE == "true" ]]
    then
	if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
	then
	    print "DEBUG: Obtaining physical volumes from _REFNODE ($_REFNODE)"
	fi
	#
	:   Get the physical volume information from the reference node
	#
	E_LSPV_CMD=$(print "LC_ALL=C lspv -L" | clencodearg -e)
cel_f1
	(( $TRY_RC != 0 )) && exit 1
	#
	:   Parse the output of the lspv command to find the disk names and
	:   PVIDs on that node
	#
	while read _out_node _out_disk _out_pvid _out_vg _out_rest ; do
	    if [[ $_out_vg == $_SVG && $_out_pvid != [Nn]one ]]
	    then
		#
		:   Add $_out_disk to the list of disks in that volume group
		:   on that node
		#
		_AVL_DISKS=${_AVL_DISKS:+"${_AVL_DISKS} "}$_out_disk
	    fi
	    if [[ $_DNAMES == @(?(* )$_out_disk?( *)) ]]
	    then
		#
		:   Add $_out_disk to the list of PVIDs for the disks provided by 
		:   the user.  This will be used for getting the physical volume 
		:   names on the node where the command will actually be run, if 
		:   it is different from the reference node.
		#
		_PVID_LIST=${_PVID_LIST:+"${_PVID_LIST} "}$_out_pvid
	    fi
	done < $try_out
	#
	:   Save a pointer to the lspv output on this node so that we can get
	:   to it later if we have to.
	#
	_disk_info=$try_out
    fi	
    #
    :	Now, get the list of physical volumes from the node on which we will
    :	run the command.
    #
    if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
    then
	print "DEBUG: Obtaining physical volumes on node $_NODE"
    fi
    TRY_RC=0
    E_LSPV_CMD=$(print "LC_ALL=C lspv -L" | clencodearg -e)
cel_f2
    (( $TRY_RC != 0 )) && exit 1
    #
    :	If we did not have to use the reference node, then set the list of
    :	available volumes here, as well as the list of physical volume ids
    #
    if [[ $_USE_REFNODE == "false" ]]
    then
	#
	:   Parse the output of the lspv command to find the disk names and
	:   PVIDs on that node
	#
	while read _out_node _out_disk _out_pvid _out_vg _out_rest ; do
	    #
	    :	Create a list of the names of the disks in that volume group
	    :	on that node
	    #
	    if [[ $_out_vg == $_SVG && $_out_pvid != [Nn]one ]]
	    then
		_AVL_DISKS=${_AVL_DISKS:+"${_AVL_DISKS} "}$_out_disk
	    fi
	    #
	    :   Create a list of PVIDs for the disks provided by the user.  This
	    :   will be used for getting the physical volume names on the node
	    :   where the command will actually be run, if it is different from
	    :   the reference node.
	    #
	    if [[ $_DNAMES == @(?(* )$_out_disk?( *)) ]]
	    then
		_PVID_LIST=${_PVID_LIST:+"${_PVID_LIST} "}$_out_pvid
	    fi
	done < $try_out
	#
	:   If we are going to use the second set of lspv output, set the
	:   pointer to it, since the temp file will have a different name.
	#
	_disk_info="$try_out"
    fi
    #
    :	If we were not called from SMIT, verify that all the disks passed on
    :	the command line are valid.  SMIT only shows those disks that are
    :	valid, so the user can not provide any that are bad.
    #
    if [[ $_CSPOC_CALLED_FROM_SMIT == "false" ]]
    then
	# 
	:   Collect the names of the given disks that do not show up on the
	:   target node
	#
	for PV in $_DNAMES
	do
	    [[ $_AVL_DISKS != @(?(* )$PV?( *)) ]] && \
		_BAD_DISKS=${_BAD_DISKS:+"${_BAD_DISKS} "}${PV}
	done
	[[ -n $_BAD_DISKS ]] && {
	    if [[ $_SVG == "None" ]] ; then
		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 20 "${_CMD}: Physical volumes ($_BAD_DISKS) are invalid on node $_REFNODE\n" ${_CMD} $_BAD_DISKS $_REFNODE 
	    else
		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 21 "${_CMD}: Physical volumes ($_BAD_DISKS) are not allocated to volume group $VG\n" ${_CMD} $_BAD_DISKS $VG 
	    fi
	    exit 1
	}
    fi
    # 
    :	Determine the physical volume id to use for importing changes on other
    :	nodes in the cluster.  We want a disk that will be in the volume group
    :	when the operation is complete, and, preferrably, one that was in the
    :	volume group before the operation, too.
    # 
    if [[ $_CHECK_ALL == "true" ]]
    then
	#
	:   Check to see if the user listed all physical volumes in the volume
	:   group
	#
	if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
	then
	    print "DEBUG: Checking for all disks on command line"
	fi
	#
	:   Check to see if there is any disk in the volume group that was not
	:   in the given list of disks
	#
	for _D in $_AVL_DISKS
	do
	    [[ $_DNAMES != @(?(* )$_D?( *)) ]] && {
		_DISK="$_D"
		break
	    }
	done
	#
	:   The expected use of _CHECK_ALL == true is for operations like
	:   reducevg and unmirrorvg that remove disks from the volume group.
	:
	:    + if all disks have been selected, none can be used for
	:      importvg, and the _SELECTED_ALL flag will indicate this.
	:
	:    + if not all disks have been selected, _DISK will contain that
	:      was not selected, and can be used for importvg once the
	:      reducevg is done
	#
	if [[ -z $_DISK ]]
	then
	    _SELECTED_ALL="true"
	else
	    _SELECTED_ALL="false"
            _IMPORT_PVID=$(grep -w $_DISK $_disk_info | read node disk pvid rest ; print $pvid)
	fi
    else
        #
        :   It was not expected that all disks in the volume group could be
        :   listed - this is not the reducevg case.  Pick an existing disk in
	:   the volume group.  
        #
	_IMPORT_PVID=$(grep -w $_VG $_disk_info | grep -v [Nn]one | read node disk pvid rest ; print $pvid)
	if [[ -z $_IMPORT_PVID && -n $_DNAMES ]]
	then
	    #
	    :	If we had not found any disks in the volume group - which could
	    :	happen on importing a new volume group - pick one of the given
	    :	disks.
	    #
            print $_DNAMES | read _DISK rest
            _IMPORT_PVID=$(grep -w $_DISK $_disk_info | \
                           read node disk pvid rest
                           if [[ $pvid == +([[:xdigit:]]) ]]
                           then
                               print $pvid
                           fi)
        fi
    fi
    #
    :	If no disks were provided, we do not need to go any further
    #
    [[ -z $_DNAMES ]] && 
	return
    rm -f $_disk_info
    #
    :	If we have a PVID_LIST and a reference node, we need to translate 
    :	the physical ids into physical names on the reference node.
    #
    if [[ -n $_PVID_LIST && $_USE_REFNODE == "true" ]] 
    then
	[[ -n $_DEBUG ]] && print "DEBUG: Translating PVID_LIST"
	_EDNAMES=""
        for _P in $_PVID_LIST
        do
	    _DISK=$(grep -w $_P $try_out | read node disk rest ; print $disk)
	    if [[ -n $_DISK ]]
	    then
		#
		:   Here build the encoded list of disk names for the command
		#
		_EDNAMES=${_EDNAMES:+"${_EDNAMES} "}$(print -- $_DISK | clencodearg)
	    fi
        done
    fi
}
###############################################################################
#
# _verify_replicated_volumes
#
# Verifies that the volume group, being operated on, is in a Replicated Resource Group.
#
# Arguments:
#
#    _VG            -  the volume group
#    _CLNODES       -  The nodes where the volume groups would be imported
#    _ACTIVE_NODE   -  the reference node where the VG is varied on
#
###############################################################################
function _verify_replicated_volumes
{
    if [[ -n "$_DEBUG" ]] 
    then
	print "DEBUG: Entering _verify_replicated_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_verify_replicaed_volumes
	    set -x
	}
    fi
    (( $# != 3 )) && {
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 55 "${_CMD}: wrong number of arguments\n" ${_CMD} 
	exit 1
    }
    typeset _VG="$1"
    typeset _ACTIVE_NODE="$2"
    typeset _CLNODES="$3"
    typeset _REP_VOL
    typeset _PPRC_REPRESOURCE
    typeset _ERCMF_REPRESOURCE
    typeset _SVCPPRC_REPRESOURCE
    typeset _SR_REPRESOURCE
    #
    :	Verify that this Volume Group contains Replicated Volumes.
    :	This is to verify that the changes made at one site will be propagated
    :	to the remote DASD. CSPOC operations will not be allowed if the changes 
    :	will not be known at the remote site.
    #
    # Replicated Volume Types:
    #       IBM PPRC 
    #       IBM GeoMirror
    #       IBM eRCMF
    #       IBM SVC PPRC
    #       EMC SRDF®
    #
    # IBM PPRC  Replicated Volumes 
    # 
    export ODMDIR=/etc/objrepos
    _REP_VOL=$(/usr/es/sbin/cluster/utilities/clodmget -q value="$_VG" -f group -n HACMPresource)
    if [[ -z $_REP_VOL ]]
    then
	#
	:   A volume group that is not in a resource group is not a replicated resource
	#
	return
    fi
    _PPRC_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f PPRC_REP_RESOURCE -n HACMPresource)
    _ERCMF_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f ERCMF_REP_RESOURCE -n HACMPresource)
    _SVCPPRC_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f SVCPPRC_REP_RESOURCE -n HACMPresource)
    _SR_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f SR_REP_RESOURCE -n HACMPresource)
    if [[ -z "$_PPRC_REPRESOURCE" && -z "$_ERCMF_REPRESOURCE" && -z "$_SVCPPRC_REPRESOURCE" && -z "$_SR_REPRESOURCE" ]]
    then
       #
       : This VG is not a PPRC Replicated Resource of any of the supported tupes
       #
       return 0
    fi
    #
    :	Verify that the cluster is active on the node with 
    :	the Volume Group varied on $_ACTIVE_NODE.
    #
    E_ACTIVE_NODE=$(print $_ACTIVE_NODE | clencodearg)
cel_f3
    if (( $cel_rc >= 1 )) 
    then
	#
	:   Cluster is active on node with $_VG varied on. Allow CSPOC operations.
	:   Lazy update will enable the changes to be made at the remote
	:   site after failover
	#
	return 0
    fi
    #
    :	The cluster is not active on the node with $_VG varied on.   Further
    :	processing depends on the resource type
    #
    if [[ -n "$_PPRC_REPRESOURCE" ]]
    then
	#
	: This is a PPRC Replicated Resource
	: Verify that the cluster is active on the node with 
	: the Volume Group varied on $_ACTIVE_NODE
	#
	res_type="PPRC"
	verify_cmd=/usr/es/sbin/cluster/pprc/utils/cl_verify_pprc_cspoc
    fi   
    if [[ -n "$_SVCPPRC_REPRESOURCE" ]]
    then
	#
	:   This is an SVC PPRC Replicated Resource
	:   Verify that the cluster is active on the node with 
	:   the Volume Group varied on $_ACTIVE_NODE
	#
	res_type="SVC PPRC"
	verify_cmd=/usr/es/sbin/cluster/svcpprc/utils/cl_verify_svcpprc_cspoc
    fi   
    if [[ -n "$_SR_REPRESOURCE" ]]
    then
	#
	:   This is an EMC SRDF® Replicated Resource
	#
	res_type="EMC SRDF®"
	verify_cmd=/usr/es/sbin/cluster/sr/utils/cl_verify_sr_cspoc
    fi   
    if [[ -n $res_type ]]
    then
	#
	:   The Cluster is not active on the node with vg varied on. If the pprc
	:   pair is not in a full-duplex state, changes made on this node may not
	:   be known at the remote ODM. Verify that the CSPOC operations will be 
	:   run on nodes that are on the same site. CSPOC operations should 
	:   succeed in this case.
	#
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 60 "WARNING: $_VG is a $res_type Replicated Resource.\n"  $_VG "$res_type"
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 61 "	 Since the cluster is NOT active on node $_ACTIVE_NODE with $_VG active,\n" $_ACTIVE_NODE $_VG
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 62 "	 the CSPOC operation may not succeed on the remote peers.\n"
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 63 "Verifying $res_type pair state ...\n" "$res_type"
	#
	:   Convert the comma separated list of nodes in _CLNODES 
	:   into space separated list SP_CLNODES
	#
	SP_CLNODES=$(IFS=, set -- $_CLNODES ; print $* )
	if ! $verify_cmd $_VG $_ACTIVE_NODE $SP_CLNODES
	then
	    nls_msg -2 -l $cspoc_tmp_log ${_MSET} 9999 "The state of the $res_type pair does not allow the CSPOC operation at this time.\n" $res_type
	    exit 1
	else
	    return 0
	fi 
    fi
}
###############################################################################
#
#
#   Name:	_lv_status
#
#
#   Input:	1. flag for clgetvg - either "-l" or "-f"
#		2. corresponding value, either logical volume or file system,
#		   in encoded form
#
#		Variables used by this function
#
#		_SPOC_FORCE - force flag set
#		_TARGET_NODES - list of nodes from command line on which this
#		    is to be made
#
#
#   Function:	Call clgetvg on each node on which the logical volume
#		operations is going to perform until one reports back the name
#		of the owning volume group.   That gets passed through to
#		_vg_status, which indicates the state of the volume group on
#		each node.
#
#		Note that it is assumed that the logical volume is known on at
#		least one of the nodes in _TARGET_NODES; it is an error for
#		this routine to be invoked with a completely unknown logical
#		volume.
#
#
#   Output:	Variables set by this function
#
#		VG  - encoded name of the owning volume group
#		DVG - decoded (readable) name of the owning volume group
#
#		Note that these have to have been defined by the caller in
#		order for the caller to pick up these values.
#
#
#   Returns:    Normally returns to caller with output variables set
#               On error, will exit with a message; does not return to caller
#
#
################################################################################
function _lv_status
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _lv_status for $1 $(print $2 | cldecodearg) version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_lv_status
	    set -x
	}
    }
    integer TRY_RC=1
    option=$1			    # either '-l' or '-f'
    parameter=$2		    # either logical volume or file system name
    #
    :	Since its only necessary to find the owning volume group once - LV
    :	names assumed to be unique across the cluster - check to see if the
    :	local node is one of the ones that should know about it.  Local tests
    :	are faster
    #
    LOCAL_NODE=$(get_local_nodename)
    if [[ $_TARGET_NODES == @(?(*,)$LOCAL_NODE?(,*)) ]] 
    then
	uu_parm=$(print $parameter | cldecodearg)
	DVG=$(clgetvg $option $uu_parm 2>/dev/null)	# supress any 'not found' msg
	TRY_RC=$?
    fi
    #
    :	If not successfully found locally, look across the rest of the cluster
    #
    if (( $TRY_RC != 0 )) || [[ -z $DVG ]]
    then
	#
	:   Find which VG contains the LV, asking each of the nodes in turn, if
	:   necessary
	#
cel_f4
	read A DVG < $try_out		    # decoded (readable) volume group name
	rm -f $try_out			    # otherwise next call just appends
    fi
    (( $TRY_RC != 0 )) &&		    # No node knows of this logical volume
	exit 1
    VG="$(print $DVG | clencodearg)"	    # encoded volume group name
    #
    :       Determine the activation status of the volume group across the
    :       cluster.  This tells us where to run the command.
    #
    _vg_status
}					    # end _lv_status
###############################################################################
#
#
#   Name:	_vg_status
#
#
#   Input:	Variables used by this function
#
#		VG - volume group name, encoded
#		DVG - volume group name, decoded
#		_SPOC_FORCE - force flag set
#		_TARGET_NODES - list of nodes from command line on which this
#		    is to be made
#
#
#   Function:	Call clresactive on each node on which the volume group
#		operation is going to be performed.  This will pass back
#		status from lsvg.  Provide in CL_NODE a choice for the node to
#		run a command against this volume group.
#
#
#               "-u"    A volume group not known on any node is not an error
#
#
#   Output:	Variables set by this function
#
#		CL_NODE - node on which to run operation against this volume
#			  group
#		_CSPOC_MODE - if set to "evaluate", and the volume group mode
#			  can be determined from the current activation
#			  state or defintions, set to "concurrent" or "shared"
#               VG_ACTIVE - flag indicating type of activation done on CL_NODE
#                           "S" - was already active 
#                           "I" - was originally inactive
#                           "P" - was originally in passive mode
#                           "C" - was selected from the concurrent list
#
#		The following space separated lists:
#
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode (varyonvg -c)
#			  If VG_ACTIVE == C, then CL_NODE is also in this list
#		S_NODES - nodes on which the volume group is vary'd on in serial
#			  mode (varyonvg)
#			  If VG_ACTIVE == S, then CL_NODE is also in this list
#		I_NODES - nodes on which the volume group is inactive (varyoffvg)
#			  If VG_ACTIVE == I, then CL_NODE was removed from this
#			  list
#		P_NODES - nodes on which the volume group is vary'd on in passive
#			  mode (varyonvg -c -P)
#			  If VG_ACTIVE == P, then CL_NODE was removed from this
#			  list
#		O_NODES - nodes on which the volume group is unknown
#		G_NODES - nodes on which the volume group is known
#
#
#   Returns:    Normally returns to caller with output variables set
#               On error, will exit with a message; does not return to caller
#
#
################################################################################
function _vg_status
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_status version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_vg_status
	    set -x
	}
    }
    #
    :   Pick up any passed options
    #
    u_flag=""
    while getopts ":u" option ; do
        case $option in
            u )
                u_flag="true"
                ;;
            * )
		shift $((OPTIND - 1))
                dspmsg scripts.cat 6555 "Option \"$option\" is not valid\n" "$option"
                return 1
                ;;
        esac
    done
    # 
    :	Check all the nodes relevant to this operation, to see what the current
    :	state of the volume group is on those nodes.
    # 
    integer TRY_RC=0
cel_f5
    (( $TRY_RC != 0 )) && 
	exit 1
    #
    :	Collect that state into local variables from the file $try_out, where
    :	it was collected from running clresactive on each node.  The format of
    :	the file is:
    :	"node_name: <status>"
    #
    C_NODES=""                          # clean out leftover values
    S_NODES=""
    I_NODES=""
    P_NODES=""
    O_NODES=""
    G_NODES=""
    while read node status rest ; do	# parse the line of $try_out
	case $status in			# note the status
	    concurrent )
		    type=C
		;;
	    active )
		    type=S
		;;
	    inactive )
		    type=I
		;;
	    passive )
		    type=P
		;;
	    no | * )
		    type=O
		;;
	esac
	#
	:   Add the node name minus the trailing ':' to the appropriate
	:   list:
	:	C_NODES - varyed on in concurrent mode - varyonvg -c
	:		  note that active mode 'varyonvg -c -A' also shows up
	:		  as 'concurrent'
	:	S_NODES - varyed on in normal mode - varyonvg
	:	I_NODES - not varyed on at all - varyoffvg
	:	P_NODES - varyed on in passive mode - varyonvg -c -P
	:	O_NODES - not known on that node - exportvg
	#
	eval ${type}_NODES=\${${type}_NODES:+\$${type}_NODES" "}${node%:}
	if [[ $type != O ]]		# status is not 'unknown'
	then
	    #
	    :	Additionally, keep a list of nodes on which the volume group
	    :	is at least defined, independent of its current state.
	    #
	    G_NODES=${G_NODES:+$G_NODES" "}${node%:}
	fi
    done < $try_out			# line at a time into the read statement
    rm -f $try_out				# otherwise next call just appends
    [[ -n $_DEBUG ]] && (( $_DEBUG >  4 )) && {
        print "DEBUG: Status of the volume group $DVG across nodes $_TARGET_NODES"
        print "DEBUG:   Concurrent = $C_NODES"
        print "DEBUG:   Active = $S_NODES"
        print "DEBUG:   Inactive = $I_NODES"
        print "DEBUG:   Passive = $P_NODES"
        print "DEBUG:   volume group is unknown = $O_NODES"
        print "DEBUG:   volume group is known = $G_NODES"
    }
    #
    :	Some C-SPOC commands work on both concurrent and shared volume groups.
    :	The intent is either flagged through SMIT, or must be determined
    :	dynamically.  If a dynamic determination has not yet been made, see if
    :	we can do so now, based on the known activation status.
    #
    if [[ -z $_CSPOC_MODE || $_CSPOC_MODE == "evaluate" ]] 
    then
	if [[ -n $C_NODES ]]
	then
	    #
	    :   Varyed on in concurrent mode
	    #
	    _CSPOC_MODE="concurrent"
	elif (( 1 < $(print $S_NODES | wc -w) ))
	then
	    #
	    :	Implicitly on in RAID concurrent mode on more than one node
	    #
	    _CSPOC_MODE="concurrent"
	elif [[ -n $S_NODES ]]
	then
	    #
	    :	Ordinary vary on at most one node
	    #
	    _CSPOC_MODE="shared"
	elif [[ -n $P_NODES ]]
	then
	    #
	    :	Passive vary on implies a shared resource
	    #
	    _CSPOC_MODE="shared"
	#
	:   We could not determine the mode from the activation state.  This
	:   would be the case when the volume group was varyed off cluster
	:   wide.  So, check the local ODM to see how its used.  The
	:   correctness of this operation depends on there being no
	:   unsynchronized changes across the cluster.
	#
	elif [[ -n $(odmget "-q name = CONCURRENT_VOLUME_GROUP and value = $DVG" HACMPresource) ]]
	then
	    #
	    :	Used as a concurrent volume group
	    #
	    _CSPOC_MODE="concurrent"
	elif [[ -n $(odmget "-q name = VOLUME_GROUP and value = $DVG" HACMPresource) ]]
	then
	    #
	    :	Used as a shared volume group
	    #
	    _CSPOC_MODE="shared"
	else
	    #
	    :   The volume group is not varied on anywhere, and not in a
	    :   resource group.  Assume shared, since that will work once
	    :   the volume group is varied on.
	    #
	    _CSPOC_MODE="shared"
	fi
    fi					    # end set _CSPOC_MODE
    #
    :	Correction for fast disk takeover
    #
    if [[ $_CSPOC_MODE == "concurrent" ]] && 
       (( 1 == $(print $C_NODES | wc -l) )) &&
       [[ -n $(odmget "-q name = VOLUME_GROUP and value = $DVG" HACMPresource) ]]
    then
	#
	:   An enhanced concurrent volume group used in active/passive
	:   mode for fast disk takeover will show up as being in
	:   concurrent mode on at most one node, but will be listed as
	:   a shared VOLUME_GROUP in HACMPresources
	#
	_CSPOC_MODE="shared"
    fi
    #
    :	Having found the status of the volume group across the cluster, pick
    :	a node that would be most appropriate to run the LVM or file system
    :	command of interest on.  Preferentially pick the local node if
    :	possible, otherwise just pick the first available.
    :
    :	At the end of this processing:
    :	    CL_NODE has the name of the node to use
    :	    VG_ACTIVE has an indication of the volume group currnet state
    #
    LOCAL_NODE=$(get_local_nodename)	    #	find out the local node name
    CL_NODE=""                              #   clean out any left over value
    if [[ -n $C_NODES ]] ; then		    #	in concurrent mode on some nodes
	#
	:   If the volume group is already varyed on in concurrent mode, pick
	:   a node from that list - preferentially, the local node - on which
	:   to run the command.
	#
	if [[ $C_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
	    CL_NODE=$LOCAL_NODE		    #	Use the local node
	else				    #	Local node is not a possibility 
	    echo $C_NODES | read CL_NODE rest #	So just use the first
	fi
	VG_ACTIVE="C"			    #	Picked from concurrent list
    elif [[ -n $S_NODES ]] ; then	    #	In shared mode on some nodes
	#
	:   The volume group can be active - ordinary varyonvg - on one or more
	:   nodes.  One node is the shared volume group case, multiple nodes
	:   would be expected in RAID concurrent mode.  Pick a node from that
	:   list - preferentially, the local node - on which to run the
	:   command.
	:   
	:   Note- it is up to the caller to decide if the operation should be
	:   allowed to proceed if the volume group is used in RAID concurrent
	:   mode on more than one node.  List operations will work, change,
	:   delete or create will not.
	#
	if [[ $S_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
	    CL_NODE=$LOCAL_NODE		    #	Use the local node
	else				    #	Local node is not a possibility 
	    echo $S_NODES | read CL_NODE rest #	So just use the first
	fi
	VG_ACTIVE="S"			    #	Used an active node
    else				    #	Not active anywhere
	#
	:   Since the volume group is currently varyed off, pick a node from
	:   the 'passive' or 'inactive' lists to vary it on.  Preferentially
	:   pick the local node.  The selected node is removed from the list,
	:   so that they remain accurate.
	#
	if [[ -n $P_NODES ]] ; then	    #	Look for passive nodes
	    if [[ $P_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
		CL_NODE=$LOCAL_NODE	    #	Use the local node
		P_NODES=$(echo $P_NODES | tr ' ' '\n' | grep -vw $LOCAL_NODE)
	    else			    #	Local node is not a possibility
		echo $P_NODES | read CL_NODE P_NODES  #	So just use the first
	    fi
	    VG_ACTIVE="P"		    #	Picked from passive list
	elif [[ -n $I_NODES ]] ; then	    #	Look for inactive nodes
	    if [[ $I_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
		CL_NODE=$LOCAL_NODE	    #	Use the local node
		I_NODES=$(echo $I_NODES | tr ' ' '\n' | grep -vw $LOCAL_NODE)
	    else			    #	Local node is not a possibility
		echo $I_NODES | read CL_NODE I_NODES  #	So just use the first
	    fi
	    VG_ACTIVE="I"		    #	Picked from inactive list
	else
            #
            :   For some operations, like importvg, its valid to have a volume
            :   group that is not currently known on any node.  In this case,
            :   the reference node must be valid.
            #
            if [[ $u_flag == "true" ]] ; then    #   unknown volume groups allowed
                VG_ACTIVE="O"                   #   And this is one of them 
                CL_NODE=$_REFNODE               #   the disks should be known here
            else
		#
		:   If for some reason it was not possible to find a node on which
		:   the volume group is or could be brought on line, the operation
		:   stops here.
		#
		nls_msg -2 -l $cspoc_tmp_log 24 6 "no node has access to $DVG\n" $DVG 
		exit 1
	    fi
	fi
    fi					    #	end by volume group state
}					    #	end _vg_status
################################################################################
#
#
#   Name:	_vg_active
#
#   Input:	Variables used by this function
#
#		VG - volume group name, encoded
#		DVG - volume group name, decoded
#		_SPOC_FORCE - force flag set
#		S_NODES - nodes on which the volume group is vary'd on in serial
#			  mode (varyonvg)
#		I_NODES - nodes on which the volume group is inactive (varyoffvg)
#		P_NODES - nodes on which the volume group is vary'd on in passive
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode
#		CL_NODE - node on which the volume group is active or to be
#			  activated
#		VG_ACTIVE - flag indicating type of activation done on CL_NODE
#			    "S" - was already active 
#			    "I" - was originally inactive
#			    "P" - was originally in passive mode
#			    "C" - was selected from the concurrent list
#		_DNAMES - list of physical disk names provided by
#			  _get_physical_volumes for those commands that use them
#
#		"-r"	- Volume group will be used for read/only operations
#			  only (e.g., display) and an _IMPORT_PVID is
#			  unnecessary
#		"-p"	- volume group need only be in passive mode for desired
#			  use
#               "-R"    - Returns 1 to the caller on failure to activate vg
#                         rather exiting. In such case CL_NODE is added to
#                         I_NODES and CL_NODE is emptied.
#
#
#   Function:	Ensure that the volume group is active on one of the nodes
#		with the intent of being able to run a command there; 
#		activate it if it is not currently active and force was
#		specified.  The volume group is activated on CL_NODE, as set
#		by _vg_status.
#
#
#   Output:	Variables set by this function
#
#		EPVID - encoded PVID which can be used for importvg
#
#
#   Returns:	Normally returns to caller with output variables set
#		On error, will exit with a message; does not return to caller
#
#
################################################################################
function _vg_active
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_active version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    set -x
	    typeset PROGNAME=_vg_active
	}
    }
    #
    :   Pick up any passed options
    #
    r_flag=""
    passive_only_flag=""
    typeset R_flag=""
    while getopts ":rRp" option ; do
        case $option in
            r )
                r_flag="true"
                ;;
            R )
                R_flag="true"
                ;;
	    p )
		passive_only_flag="true"
		;;
            * )
                shift $((OPTIND - 1))
                dspmsg scripts.cat 6555 "Option \"$option\" is not valid\n" "$option"
                return 1
                ;;
        esac
    done
    if [[ -z $CL_NODE ]] ; then
	#
	:   If for some reason it was not possible to find a node on which the
	:   volume group is or could be brought on line, the operation stops
	:   here.  CL_NODE was set to $CL_NODE in _vg_status
	#
	nls_msg -2 -l $cspoc_tmp_log 24 6 "no node has access to $DVG\n" $DVG 
        [[ -z $R_flag ]] && exit 1
	return 1
    fi
    if [[ $VG_ACTIVE == I || ( $VG_ACTIVE == P && -z $passive_only_flag ) ]] ; then
	#
	:   If the volume group needs to be brought on line, do so on the
	:   selected node.  clvaryonvg will do the appropriate kind of varyon.
	#
	TRY_RC=0
cel_f6
        (( $TRY_RC != 0 )) && {
            [[ -z $R_flag ]] && exit 1
            I_NODES="$CL_NODE $I_NODES"
            CL_NODE=""
            return 1
        }                                   #   varyon failed
    elif [[ $VG_ACTIVE == I && $passive_only_flag == "true" ]]
    then
	#
	:   If the volume group needs be brought online in passive mode
	:   only, invoke cl_pvo to do so
	#
	TRY_RC=0
cel_f7
        (( $TRY_RC != 0 )) && {
            [[ -z $R_flag ]] && exit 1
            I_NODES="$CL_NODE $I_NODES"
            CL_NODE=""
            return 1
	}
    fi					    #	end varyon
    [[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && 
	print "DEBUG: CL_NODE = $CL_NODE"
    #
    :	If there are inactive nodes whose information will have to be updated,
    :	get a useful PVID.
    #
    if [[ -z $EPVID && -z $r_flag && -z $passive_only_flag && ( -z $_REMOVED_VG || $_REMOVED_VG == "true" ) ]] 
    then
	#
	:   Check the disks, and find a PVID we can use for importvg later, if
	:   it has not been done in a prior check.
	#
	if [[ -z $_IMPORT_PVID ]]
	then
	    _verify_physical_volumes $DVG false false $CL_NODE
	    [[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && {
		print "DEBUG: _IMPORT_PVID = $_IMPORT_PVID"
	    }
	fi
	EPVID=$(echo $_IMPORT_PVID | clencodearg)
    fi
}					    #	end _vg_active
##############################################################################
#
#
#   Name:       _vg_sync
#
#   Input:	function request
#		    sync - synchronize
#		    release - restore volume group to original state
#		If no function request is passed, both functions are performed
#
#		chvg command
#		    lists a chvg command to be run on all nodes which must do
#		    an importvg -L, for those functions not picked up by
#		    importvg -L
#		
#		    This is valid only if the 'sync' function request is
#		    specified
#
#               Variables used by this function
#
#               VG - volume group name, encoded
#               DVG - volume group name, decoded
#               CL_NODE - node on which the volume group is active
#               I_NODES - nodes on which the volume group is inactive
#			  (varyoffvg)
#               P_NODES - nodes on which the volume group is vary'd on in
#			  passive mode (varyonvg -c -P)
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode
#               VG_ACTIVE - flag indicating type of activation done on CL_NODE
#                           "S" - was already active 
#                           "I" - was originally inactive
#                           "P" - was originally in passive mode
#			    "C" - was in concurrent mode
#		EPVID - encoded PVID of a disk in the volume group to use for 
#			importvg -L
#		_IMPORT_PVID - decoded PVID of a disk in the volume group to
#			use for importvg -L
#		_REMOVED_VG_ - operation resulted in the deletion of the
#			       volume group
#
#
#   Function:   Synchronize the updated volume group information across the 
#               cluster.   This ensures that the ODM information on each node
#               on which the volume group is defined actually matches what's
#               out on the disks, as modified by the operation.
#
#
#   Output:     None
#
#
#   Returns:    Normally returns to caller 
#               On error, will exit with a message; does not return to caller
#
#
##############################################################################
function _vg_sync
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_sync version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_vg_sync
	    set -x
	}
    }
        request=$1			    #	function requested by caller
    chvg_cmd=$2			    #	Any chvg command that has to be run
    integer TRY_RC=0		    #	return code from CSPOC operations
    integer SAVE_RC=0		    #	error did not stop operations
    _REMOVED_VG_=${_REMOVED_VG_:="false"}   #	operation deleted volume group
    if [[ -z $request || $request == "sync" ]] &&
       [[ $_REMOVED_VG_ == "false" ]] ; then
	if [[ -n $I_NODES && -n $EPVID ]] ; then
	    #
	    :   There are nodes on which volume group $DVG was inactive, and for
	    :   which the local ODM must be updated to match the volume group.  This
	    :   processing is skipped for nodes which have the volume group varyed on
	    :   in passive mode, since LVM does the updates automatically.
	    #
	    if [[ $VG_ACTIVE == S ]] ; then
		#
		:   The volume group $DVG was brought on line in shared mode -
		:   ordinary varyonvg - remove the reserve so that the other
		:   nodes can read the VGDA and VGSA information from the disks.
		#
cel_f8
		(( $TRY_RC != 0 )) && 
		    exit 1
            elif [[ $VG_ACTIVE == I ]] ; then
		# 
		:   The volume group $DVG was originally - before start of this
		:   C-SPOC plan - inactive, so vary it off again.  This will remove
		:   the reserve so that the other nodes can read the VGDA and
		:   VGSA information from the disks.
		# 
cel_f9
                (( $TRY_RC != 0 )) &&
                    exit 1
                #
                :   Get the time stamps in sync
                #
                cl_update_vg_odm_ts -o $DVG "$G_NODES"
                                    VG_ACTIVE=""
	    fi
	    #
	    :	Have each of the inactive nodes run "importvg -L" followed by
	    :	any needed chvg cmmand to update the local ODM, and update the
	    :	local HACMP timestamps for this volume group.
	    #
	    update_cmd="clupdatevg $DVG $_IMPORT_PVID"
	    #---------------------------------------------------
            :   - if remote script debugging is desired - VERBOSE_LOGGING_REMOTE=high
            :   - set up request so the output of the script at the remote node is
            :     saved in /var/hacmp/log/cspoc.log.remote
            #---------------------------------------------------
            if [[ $VERBOSE_LOGGING_REMOTE == "high" ]]
            then
                update_debug_env="VERBOSE_LOGGING=high"
                update_debug="2>&1 | tee >> /var/hacmp/log/cspoc.log.remote"
                update_cmd="$update_debug_env $update_cmd $update_debug"
            fi
	    if [[ -n $chvg_cmd ]] ; then
		update_cmd="$update_cmd && $chvg_cmd"
	    fi
	    e_update_cmd=$(echo $update_cmd | clencodearg -e)
	    NODE_LIST=$(IFS=, set -- $I_NODES ; print "$*")
cel_f10
	    #
	    :	Even if some nodes failed on the "importvg -L", we still need to
	    :	clean up below, so save the error for later
	    #
	    (( $TRY_RC != 0 )) && 
		SAVE_RC=1
	fi
    fi
    if [[ -z $request || $request == "release" ]] &&
       [[ $_REMOVED_VG_ == "false" ]] ; then
	#
	:   Now that all nodes have updated state, put volume group back $DVG into
	:   into the state we found it in - the state was set by _vg_active -
	:   assuming, of course, that it was not entirely removed in the
	:   operation
	#
	TRY_RC=0
	case $VG_ACTIVE in
	I)  #
	    :	The volume group $DVG was originally inactive.  If it was 
	    :	varyed off up above in the synchronization path, nothing needs
	    :	be done here
	    #
	    if [[ $request == release ]]
	    then
		#
		:   The volume group $DVG was originally inactive.  So, vary it off
		#
cel_f11
		(( $TRY_RC != 0 )) && 
		    exit 1
                #
                :   Get the time stamps in sync
                #
                cl_update_vg_odm_ts -o $DVG "$G_NODES"
                    		if [[ -n $P_NODES || -n $C_NODES || -n $S_NODES ]]
		then
		    #
		    :   On a successful varyoff, set the fence height to allow read only
		    :   access if there are any other nodes that are using this volume 
		    :	group.  This should preserve the volume group from inadvertent
		    :   modification by this node.
		    #
cel_f12
		fi
	    fi
	    ;;
	P)  #	
	    :	The volume group $DVG was originally varyed on in passive mode.  So,
	    :	return it to that mode on $CL_NODE
	    #
	    if [[ $passive_only_flag != "true" ]]
	    then
cel_f13
		(( $TRY_RC != 0 )) && 
		    exit 1
		#
		:   On a successful varyoff, set the fence height to allow read only
		:   access.  This should preserve the volume group from inadvertent
		:   modification by this node.
		#
cel_f14
	    fi
	    #
	    :   On a successful varyoff, set the fence height to allow read only
	    :   access.  This should preserve the volume group from inadvertent
	    :   modification by this node.
	    #
cel_f15
	    ;;
	S)  #
	    :	The volume group $DVG was originally active.  If we removed the reserves
	    :	up above, do another varyon to put them back on node $CL_NODE
	    #
	    if [[ -n $CL_NODE && -n $I_NODES ]]
	    then
cel_f16
		(( $TRY_RC != 0 )) && 
		    exit 1
	    fi
	    ;;
	C)  #
	    :	The volume group $DVG was in concurrent mode, nothing needs be
	    :	done
	    #
	    ;;
        O)  #
            :   The volume group $DVG was originally unknown and subsequently
            :   imported, nothing actually has to be done here
            #
            ;;
		esac
    fi
    if [[ $_REMOVED_VG_ == "true" && -n $I_NODES ]] ; then
	#
	:   The LVM operation resulted in the complete removal of the
	:   volume group $DVG, export it on the inactive nodes to get rid of their
	:   definitions, too.
	#
	NODE_LIST=$(IFS=, set -- $I_NODES ; print "$*")
cel_f17
	(( $TRY_RC != 0 )) && 
	    SAVE_RC=1
    fi
    return $SAVE_RC		#   Pass back any saved return code
}				#   End _vg_sync
# Include file containing SCSIPR functions
#!/bin/ksh93
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/events/utils/cl_scsipr_event_functions.sh 1.9 
#  
# Licensed Materials - Property of IBM 
#  
# Restricted Materials of IBM 
#  
# COPYRIGHT International Business Machines Corp. 2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
export PATH=$(/usr/es/sbin/cluster/utilities/cl_get_path all)
###########################################################################
#	Function: node_up_scsipr_init
#	called from: node_up
#	Description: This function sets the prkey and policy(PR_shared)
#	of all the VGs which are HACMP resource
#	returns: 0, on success
#	         1, otherwise
#
##########################################################################
function node_up_scsipr_init {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset VGs #Volume Groups defined on this node
	typeset VG=""
	typeset PR_Key=""	# Persistent Reserver Key value
	typeset -i RC=0		# Return variable
	typeset -i RC1=0	# Return variable
	typeset -i flag=0
	typeset LOCALNODENAME=$(get_local_nodename)
	PR_Key=$(clpr_obtain_prkey $LOCALNODENAME)
	if (( $? != 0 ))
	then
		#
		: Failed generating the PR Key of the node $LOCALNODENAME.
		#
		return 1
	fi
	VGs=$(print $(lsvg -L 2> /var/hacmp/log/${PROGNAME}.LSVG.ERR | egrep -vw 'rootvg|caavg_private'))
	for VG in $VGs
	do
		typeset resgrp=$(clodmget -q "name like '*VOLUME_GROUP' and value = $VG" -f group -n HACMPresource)
		if [[ -n $resgrp ]]
		then
			#
			:   Volume group $VG is an HACMP resource
			#
			clpr_verifycap_vg $VG
			RC=$?
			if (( $RC == 0 ))
			then
				#Check if the policy is already set to PR_shared 
				typeset disklist=$(lspv -L | grep -w $VG | awk '{print $1}')
				if [[ -z $disklist ]]
				then
					#
					: Unable to get the list of hdisks.
					#
					return 1
				fi
				typeset hdisk=""
				for hdisk in $disklist
				do
					typeset policy=$(clpr_Get_policy $hdisk)
					if (( $? != 0 ));then
						return 1
					elif [[ $policy == PR_shared ]];then
						typeset pr_key=$(clpr_Get_prkey $hdisk)
						if (( $? != 0 ));then
							return 1
						elif [[ $pr_key == $PR_Key ]];then
							flag=1
							continue
						else
							flag=0
							break
						fi
					else
						flag=0
						break
					fi
				done
								# Set the PR policy to the PR_shared and set the PR Key 
				if (( $flag == 0 ));then
					clpr_Set_policy_prkey_vg $VG PR_shared $PR_Key
					if (( $? != 0 ))
					then
						#
						: Failed to set either policy or PR Key on the VG $VG.
						#
						# Continue processing the next Volume Group
						continue
					fi
				fi
				# Register and reserve the disks of Volume Group
				clpr_reg_res_vg $VG $PR_Key
				if (( $? != 0 ))
				then
										#
					: Failed to register and reserve the VG $VG from node $LOCALNODENAME.
					: Unregistering from the VG in case some of the disk got reserved.
					: VG $VG will not come online and corresponding RG will go to error state.
					#
					/usr/bin/errlogger "$PROGNAME : Failed to register and reserve $VG."
					clpr_removeReg_vg $VG 
					# Continue processing the next Volume Group
					continue
				fi
				#
				: Checking if reservation succeeded
				#
				clpr_verifyKey_vg $VG $PR_Key
				RC1=$?
				if (( $RC1 == 1 ))
				then
										#
					: No reservation exists on the VG $VG for node $LOCALNODENAME.
					#
				elif (( $RC1 == 0 ))
				then
					# 
					: Reservation success
					#
				else
							#
					: Failed to verify if the PR_Key of node $LOCALNODENAME got registered.
					#
				fi	
			elif (( $RC == 1 ))
			then
				#
				: VG $VG is not SCSIPR Type 7 capable.
				: $VG will not be varied on and corresponding RG will go to error state.
				: There are one or more disks in the VG $VG which are not SCSIPR capable.
				: Select the disks which are SCSIPR Type 7 capable or disable Disk Fencing
				: in order bring this Volume Group online.
				#
			else
				#
				: Failed to verify SCSIPR capability of the VG $VG.
				: VG $VG will not be varied on and corresponding RG will go to error state.
				#
			fi
		fi
	done
	return 0
}
#######################################################################################
#	Function: node_down_scsipr_term
#	called from: node_down_complete
#	Description: This function clears the reservation on a Volume Group which is a
#	HACMP resource, when last node is going down.
#	retrun: 0 on success, gives a warning on failure
#######################################################################################
function node_down_scsipr_term {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset VGs=""	# List of Volume Groups   
	typeset VG=""
	VGs=$(print $(lsvg -L 2> /var/hacmp/log/${PROGNAME}.LSVG.ERR | egrep -vw 'rootvg|caavg_private'))
	for VG in $VGs
	do
		if [[ -z $(clodmget -q "name like '*VOLUME_GROUP' and value = $VG" -f value -n HACMPresource) ]]
		then
			#
			: VG is not the part of HACMPresource.
			: Hence no SCSIPR action will be taken on VG $VG.
			#
		else
			#
			: Clear registration ond reservation from the Volume Group, $VG
			#
			clpr_clear_vg $VG 
			if (( $? != 0 ))
			then
				#
				: Failed to clear the reservation and registration from the VG $VG.
				: Registration and reservation needed to be cleared in order to
				: use the disks which are part of the VG $VG.
				: Use clmgr clpr_clear hdiskname to clear the reservation from the disks.
				#
			fi
		fi
	done
	return 0
}
######################################################################################
#	Function: cl_confirm_scsipr_res
#	called from: cl_pvo, clvaryonvg, cl_mode3
#	Description: This function checks if the reservation and registration are present
#	on the VG, which is to be varied on.
#	Arguements: Name of the VG
#	returns: 0, on success
#	         1, otherwise 
#
######################################################################################
function cl_confirm_scsipr_res {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset VG=$1
	typeset PR_Key=""	# Persistent Reserve Key value
	typeset -i RC=0 	# return variable
	typeset -i RC=0 	# return variable
	typeset LOCALNODENAME=$(get_local_nodename)
	PR_Key=$(clpr_obtain_prkey $LOCALNODENAME)
	if (( $? != 0 )) 
	then
		#
		: Failed generating the PR Key of the node $LOCALNODENAME.
		#
		return 1
	fi
	# Verifying if the Volume Group $VG is SCSIPR Type 7 capable
	clpr_verifycap_vg $VG
	RC=$?
	if (( $RC == 0 )) 
	then
		# 
		:  Volume Group $VG is SCSIPR Type 7 capable.
		:  Verifying if the PR Key of the $LOCALNODENAME is registered.
		#
		clpr_verifyKey_vg $VG $PR_Key
		RC1=$?
		if (( $RC1 == 0 ))
		then
			#
			: PR Key of $LOCALNODENAME is present on the Volume Group, $VG.
			#
			return 0
		elif (( $RC1 == 1 ))
		then
			#
			: No reservation exists on Volume Group $VG.
			: Volume Group $VG will not be varied on and corresponding RG will go to error state.
			#
		else
					#
			: Failed reading reservation on VG $VG from $LOCALNODENAME.
			: Volume Group $VG will not be varied on and corresponding RG will go to error state.
			#
		fi
	elif (( $RC == 1 ))
	then
		#
		: Volume Group $VG is not SCSIPR Type 7 capable.
		: Volume Group $VG will not be varied on and corresponding RG will go to error state.
		#
	else
		#
		: Failed to verify SCSIPR capability of the VG $VG.
		: Volume Group $VG will not be varied on and corresponding RG will go to error state.
		#
	fi
	return 1
}
#######################################################################################
#	Function: disk_replace_scsipr
#	called from: cldiskreplace
#	Description: This function clears the registration and reservation on the source
#			disk and register and reserve the new disk.
#	returns: 0, on success
#	         1, otherwise 
#
########################################################################################
function disk_replace_scsipr {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset source_hdisk=$1
	typeset replacement_hdisk=$2
	typeset VG_name=$3
	typeset PR_Key="" 	# Persistent Reserve Key value
	typeset -i RC=0         # return variable
	typeset LOCALNODENAME=$(get_local_nodename)
	#
	:   Find out whether cluster services are active on this node
	#
	if  LC_ALL=C lssrc -ls clstrmgrES 2>&1 | grep "Current state:" | egrep -q -v "ST_INIT|NOT_CONFIGURED"
	then
		#
		:    Checking if $VG_name is SCSIPR Type 7 capable.
		:    If it is so, then clear the reservation from $source_hdisk.
		#
		clpr_verifycap_vg $VG_name
		if (( $? == 0 ))
		then
			#
			:    Registering and Reserving the $replacement_hdisk if
			:    it is SCSIPR Type 7 capable.
			#
			clpr_verifycap $replacement_hdisk
			RC=$?
			if (( $RC == 0 ))
			then
				#  Generate the PR Key for the node.
				PR_Key=$(clpr_obtain_prkey $LOCALNODENAME)
				if (( $? != 0 ))
				then
					#
					: Failed generating the PR Key of the node $LOCALNODENAME.
					: 
					return 1
				fi
				# Set PR Key and policy(PR_shared) on replacement_hdisk.
				clpr_Set_policy_prkey $replacement_hdisk PR_shared $PR_Key
				if (( $? == 0 )) 
				then
					# Register and reserve replacement_hdisk.
					clpr_reg_res $replacement_hdisk $PR_Key
					if (( $? != 0 ))
					then
						#
						: Failed to register and reserve the hdisk $replacement_hdisk for node $LOCALNODENAME.
						#
						/usr/bin/errlogger "$PROGNAME : Failed to register and reserve $replacement_hdisk."
						return 1
					fi
				else
					#
					: Failed setting the Policy or PR Key.
					#
					return 1
				fi
			elif (( $RC == 1 ))
			then
				#
				: Disk $replacement_hdisk is not SCSIPR Type 7 capable.
				: This disk can not be replaced. Disable Disk Fencing in order to use this disk.
				#
				return 1
			else
				#
				: Failed to verify SCSIPR capability of the Disk $replacement_hdisk.
				#
				return 1
			fi
		fi
	fi
	#
	:   Clear the registration and reservation from $source_hdisk.
	#
	clpr_clear $source_hdisk
	if (( $? != 0 ))
	then
		#
		: Failed to clear the reservations on disk $source_hdisk.
		: Please use clmgr clpr_clear hdiskname to clear the reservation on the disk before using it again.
		#
	fi
	return 0
}
##################################################################################
#	Function: cl_scsipr_dare_Reg_Res
#	called from: reconfig_resource_acquire, cl_mkvg, cl_mkvg4vp
#	Description: This function register and reserve new VG being created.
#	returns: 0, on success
#	         1, otherwise
#
##################################################################################
function cl_scsipr_dare_Reg_Res {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
		typeset VG=$1
	typeset PR_Key="" 	# Persistent Reserve Key value
	typeset -i RC=0 	# return variable
	typeset -i RC1=0     	# return variable
	typeset -i flag=0   # If 1 policy and PR Key are already set.
						# if 0 policy and PR Key are to be set. 
	typeset LocalNode=$(get_local_nodename)
	PR_Key=$(clpr_obtain_prkey $LocalNode)
	if (( $? != 0 ))
	then
		#
		: Failed generating the PR Key of the node $LocalNode.
		#
		return 1
	fi
	#  Verifying if the Volume Group is SCSIPR Type 7 capable.
	clpr_verifycap_vg $VG
	RC=$?
	if (( $RC == 0 ))
	then
		#Check if the policy is already set to PR_shared
		typeset disklist=$(lspv -L | grep -w $VG | awk '{print $1}')
		if [[ -z $disklist ]]
		then
			#
			: Unable to get the list of hdisks.
			#
			return 1
		fi
		typeset hdisk=""
		for hdisk in $disklist
		do
			typeset policy=$(clpr_Get_policy $hdisk)
			if (( $? != 0 ));then
				return 1
			elif [[ $policy == PR_shared ]];then
				typeset pr_key=$(clpr_Get_prkey $hdisk)
				if (( $? != 0 ));then
					return 1
				elif [[ $pr_key == $PR_Key ]];then
					flag=1
					continue
				else
					flag=0
					break
				fi
			else
				flag=0
				break
			fi
		done
		# Set the PR policy to the PR_shared and set the PR Key
		if (( $flag == 0 ));then
			clpr_Set_policy_prkey_vg $VG PR_shared $PR_Key
			if (( $? != 0 ))
			then
				#
				: Failed to set either policy or PR Key on the VG $VG.
				#
				return 1 
			fi
		fi
		#
		:  Registering and reserving PR Key of node, $LocalNode on Volume Group, $VG.
		#
		clpr_reg_res_vg $VG $PR_Key
		if (( $? != 0 ))
		then
			#
			: Failed to register and reserve the VG $VG for node $LocalNode.
			: Unregistering from the VG in case some of the disk got reserved.
			#
			/usr/bin/errlogger "$PROGNAME : Failed to register and reserve $VG."
			clpr_removeReg_vg $VG 
			return 1
		fi
		#
		: Checking if reservation succeeded
		#
		clpr_verifyKey_vg $VG $PR_Key
		RC1=$?
		if (( $RC1 == 0 ))
		then
			#
			: Registration and reservation succeeded.
			#
			return 0
		elif (( $RC1 == 1 ))
		then
			#
			: No reservation exists on the VG $VG.
			#
		else
			#
			: Failed Verifying the PR Key of node $LocalNode on VG $VG. 
			#
		fi
	elif (( $RC == 1 ))
	then
		#
		: Volume Group $VG is not SCSIPR Type 7 capable.
		#
	else
		#
		: Failed to verify SCSIPR capability of the VG $VG.
		#
	fi
	return 1 
}
#########################################################################
#	Function: clpr_Get_policy
#	Description: Get the current 'reserve_policy'  (via lsattr -El)
#	             input Diskname whose policy needs to be retrived
#	returns: 0 on success
#	         10 otherwise
##########################################################################
function clpr_Get_policy {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset diskname=$1
	typeset policy=""	# Reserve Policy
	# Get current reserve_policy
	policy=$(lsattr -El $diskname -a reserve_policy | awk '/reserve_policy/ {print $2}')
	if [ -z "$policy" ]
	then
		#
		: Unable to get reserve_policy of $diskname from lsattr.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to get ODM reserve_policy. Cannot register/reserve $diskname."
		return 10 
	fi
  	echo "$policy"
  	return 0
}
#########################################################################
#	Function: clpr_Get_prkey
#	Description: Get current PR_key_value  (via lsattr -El)
#	Arguments: Diskname
#	returns: an error code.  (0 - success)
##########################################################################
function clpr_Get_prkey {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset diskname=$1
	typeset prkey="" 	# Persistent Reserve Key value
	# Get current pr_key
	prkey=$(lsattr -El $diskname -a PR_key_value |  awk '/PR_key_value/ {print $2}')
	if [ -z "$prkey" ]
	then
		#
		: Unable to get PR_key_value of $diskname from lsattr.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to get ODM PR_key_value. Cannot register/reserve $diskname."
		return 10 
	fi
	echo "$prkey"
	return 0
}
#########################################################################
#	Function: clpr_Set_policy_prkey
#	Description: Set reserve_policy and PR_key_value'  (via chdev)
#	Arguments: Diskname, policy  and prkey to be set
#	returns an error code.  (0 - success)
##########################################################################
function clpr_Set_policy_prkey {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset diskname=$1
	typeset policy=$2
	typeset prkey=$3
	typeset verify_policy=""
	typeset verify_prkey=""
	# Set require prkey first and later reserve_policy
	chdev -l $diskname -a PR_key_value=$prkey -a reserve_policy=$policy 1>/dev/null
	if  (( $? != 0 )) 
	then
		#
		: Unable to set prkey $prkey and policy $policy for $diskname using chdev.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $diskname."
		return 11 
	fi
	# Verify if policy is set correctly
	verify_policy=$(lsattr -El $diskname -a reserve_policy | awk '/reserve_policy/ {print $2}')
	if [ -z "$verify_policy" ]
	then
		#
		: Unable to get reserve_policy for $diskname from lsattr.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $diskname."
		return 10
	fi
	if [ $verify_policy != $policy ]
	then
				#
		: reserve_policy $policy for disk $diskname was not set.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $diskname."
		return 1
	fi
	# Verify if required PR_key_value is set
	verify_prkey=$(lsattr -El $diskname -a PR_key_value |  awk '/PR_key_value/ {print $2}')
	if [ -z "$verify_prkey" ]
	then
		#
		: Unable to get PR_key_value for $diskname from lsattr.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $diskname."
		return 10
	fi
	if [ $verify_prkey != $prkey ]
	then
			#
		: PR_key_value $prkey for $diskname was not set.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $diskname."
		return 1
	fi
		return 0
}
#########################################################################
#	Function: clpr_Get_policy_vg()
#	Description: Get the current 'reserve_policy'  (via lsattr -El)
#	Arguments: VGname 
#	returns: an error code.  (0 - success)
##########################################################################
function clpr_Get_policy_vg {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset vgname=$1
	typeset -i count=0
	typeset array=""
	typeset policy=""
	array=$(lspv -L | grep -w $vgname | awk '{print $1}')
	if [ -z "$array" ]
	then
			#
		: Unable to get the list of hdisks.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to get ODM reserve_policy. Cannot register/reserve $vgname."
		return 1
	fi
	set -A disk_list $array
	count=$(lspv -L | grep -wc $vgname)
	if (( $? != 0 ))
	then
		#
		: Unable to get the number of disks in VG $vgname.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to get ODM reserve_policy. Cannot register/reserve $vgname."
		return 1
	fi
	# Get current reserve_policy
	typeset -i i=0
	while (( $i < $count ))
	do
		policy=$(/usr/sbin/lsattr -El ${disk_list[i]} -a reserve_policy | awk '/reserve_policy/ {print $2}')
		if [ -z "$policy" ]
		then
			#
			: Unable to get reserve_policy of disk ${disk_list[i]}. 
			#
			/usr/bin/errlogger "$PROGNAME : Failed to get ODM reserve_policy. Cannot register/reserve $vgname."
			return 10
		fi
		#
		: Current reserve policy of ${disk_list[i]} is $policy.
		#
		(( i = $i + 1 )) 
	done
	return 0
}
#########################################################################
#	Function: clpr_Get_prkey_vg
#	Description: Get current PR_key_value  (via lsattr -El)
#	Arguments: VGname
#	returns: an error code.  (0 - success)
##########################################################################
function clpr_Get_prkey_vg {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset vgname=$1
	typeset array=""
	typeset pr_key=""
	typeset -i count=0
	typeset -i i=0
	array=$(lspv -L | grep -w $vgname | awk '{print $1}')
	if [ -z "$array" ]
	then
		#
		: Unable to get the list of disks for VG $vgname.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to get ODM PR_key_value. Cannot register/reserve $vgname."
		return 1
	fi
	set -A disk_list $array
	count=$(lspv -L | grep -wc $vgname)
	if (( $? != 0 ))
	then
		#
		: Unable to get the number of disks of VG $vgname.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to get ODM PR_key_value. Cannot register/reserve $vgname."
		return 1
	fi
	# Get current pr_key
	while (( $i < $count ))
	do
		pr_key=$(lsattr -El ${disk_list[i]} -a PR_key_value | awk '/PR_key_value/ {print $2}')
		if [ -z "$pr_key" ]
		then
			#
			: Unable to get PR_key_value of disk ${disk_list[i]} from lsattr.
			#
			/usr/bin/errlogger "$PROGNAME : Failed to get ODM PR_key_value. Cannot register/reserve $vgname."
			return 10
		fi
		#
		: Current pr_key of ${disk_list[i]} is $pr_key.
		#
		(( i = $i + 1 ))
	done
	return 0
}
#########################################################################
#	Function: clpr_Set_policy_prkey
#	Description: Set reserve_policy and PR_key_value'  (via chdev)
#	Arguments: VGName, policy and prkey
#	returns: an error code.  (0 - success)
##########################################################################
function clpr_Set_policy_prkey_vg { 
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset vgname=$1
	typeset policy=$2
	typeset prkey=$3
	typeset array=""
	typeset -i count=0
	typeset -i i=0
	typeset verify_prkey=""
	array=$(lspv -L | grep -w $vgname | awk '{print $1}')
	if [ -z "$array" ]
	then
		#
		: Unable to get the list of disks for VG $vgname.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $vgname."
		return 1
	fi
	set -A disk_list $array
	count=$(lspv -L | grep -wc $vgname)
	if (( $? != 0 ))
	then
		#
		: Unable to get the number of disks of VG $vgname.
		#
		/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $vgname."
		return 1
	fi
	typeset -i i=0
	while (( $i < $count ))
	do
		###########################################################################
		# Set pr_key first and later reserve_policy, a requirement on emc devices #
		###########################################################################
		chdev -l ${disk_list[i]} -a PR_key_value=$prkey -a reserve_policy=$policy 1>/dev/null
		if (( $? != 0 ))
		then
			#
			: Unable to set prkey $prkey and policy $policy for disk ${disk_list[i]} from chdev.
			#
			/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $vgname."
			return 11
		fi
		# Verify if required PR_key_value is set
		verify_prkey=$(lsattr -El ${disk_list[i]} -a PR_key_value |  awk '/PR_key_value/ {print $2}')
		if [ -z "$verify_prkey" ]
		then
			#
			: Unable to get pr_key_value for ${disk_list[i]}.
			#
			/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $vgname."
			return 10
		fi
		if [ $verify_prkey != $prkey ]
		then
			#
			: PR_key_value $prkey for disk ${disk_list[i]} was not set.
			#
			/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $vgname."
			return 1
		fi
		# Verify if policy is set correctly
		verify_policy=$(lsattr -El ${disk_list[i]} -a reserve_policy | awk '/reserve_policy/ {print $2}')
		if [ -z "$verify_policy" ]
		then
			#
			: Unable to get reserve_policy for disk ${disk_list[i]}.
			#
			/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $vgname."
			return 10
		fi
		if [ $verify_policy != $policy ]
		then
			#
			: Reserve_policy $policy for disk ${disk_list[i]} was not set.
			#
			/usr/bin/errlogger "$PROGNAME : Failed to set ODM reserve_policy and PR_key_value. Cannot register/reserve $vgname."
			return 1
		fi
		(( i = $i +1 ))
	done
	return 0
}
#########################################################################
# 	Function: clpr_obtain_prkey
#	Description: Generate unique PR_key for the node
#	PR_KEY format: "node id" "followed by three Zeros" "cluster id"
#	Arguments: Nodename 
#	returns: an error code.  (0 - success)
##########################################################################
function clpr_obtain_prkey { 
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset node=$1
	# Get Cluster ID to generate PR_Key
	cid=$(clodmget -f id HACMPcluster)
	if (( $? != 0 ))
	then
		#
		: Failed to get Cluster ID via clodmget from $node.
		#
		return 1
	fi
	if [ -z "$cid" ]
	then
		#
		: Cluster ID is empty from $node.
		#
		return 1
	fi
	# Get Node ID to generate PR_Key
	nid=$(clodmget -n -f node_id -q "name=$node and object=COMMUNICATION_PATH" HACMPnode)
	if (( $? != 0 )) 
	then
		#
		: Failed to get Node ID via clodmget from $node.
		#
		return 1
	fi
	if [ -z "$nid" ]
	then
				#
		: Node ID is empty for $node.
		#
		return 1
	fi
	if [ $nid -eq 0 ]
	then
		#
		: Node ID is zero for $node.
		#
		return 1
	fi
	typeset seperator=000
	# Put to PR_KEY format: "node id" "followed by three Zeros" "cluster id"
	pr_key_value=$nid$seperator$cid
	echo "0x$pr_key_value"
	return 0
}
###################################################################################
#	Function: cl_scsipr_Reg_Res
#	called from: cl_extendvg
#	Description: This function register and reserve the new disk/disks being 
#	added to the Volume Group.
#	returns: 0, on success
#	         1, otherwise
#
###################################################################################
function cl_scsipr_Reg_Res {
	typeset Nodename=$(get_local_nodename)
	typeset -i RC1=0
	typeset disk=$1
	clpr_verifycap $disk
	RC1=$?
	if (( $RC1 == 0 ))
	then
		PR_Key=$(clpr_obtain_prkey $Nodename)
		if (( $? != 0 ))
		then
			#
			: Failed generating the PR Key of the node $Nodename.
			#
			return 1
		fi	
		# Set policy and PR Key on disk being added
		clpr_Set_policy_prkey $disk PR_shared $PR_Key
		if (( $? == 0 ))
		then
			clpr_reg_res $disk $PR_Key
			if (( $? == 0 ))
			then
				# Continue to process new disks if any.
				continue  
			else
				#
				: Failed to register and reserve the hdisk $disk for node $Nodename.
				#
				return 1
			fi
		else
			#
			: Failed setting the Policy or PR Key.
			#
			return 1
		fi
	elif (( $RC1 == 1 ))
	then
		#
		: Disk $disk is not SCSIPR Type 7 capable.
		#
		return 1
	else
		#
		: Failed to verify SCSIPR capability of the Disk $hdisk.
		#
		return 1
	fi
	return 0
}
## Here, we check if the user passed in a resource group or a nodelist
## we set the list of nodes 
if [[ -z $_RG_NODE_LIST ]] ; then
    LISTOFNODES="$_NODE_LIST"
    RG_SUPPLIED="false"
else
    #
    :	Get comma separated list of nodes
    #
    LISTOFNODES=$(IFS=, set -- $_RG_NODE_LIST ; print "$*" )
    RG_SUPPLIED="true"
fi
if [[ -z $LISTOFNODES ]]
then
    # Use default - all nodes in the cluster..
    LISTOFNODES="$_TARGET_NODES"
fi
#
:   Determine where the mkvg command will actually be run.  If the local node
:   is in the node list, use it.  Otherwise, use the first node in the list.
#
if [[ -z $LOCAL_NODE ]]
then
    LOCAL_NODE=$(get_local_nodename)
fi
if [[ $LISTOFNODES == ?(*,)$LOCAL_NODE?(,*) ]]
then
    #
    :   Will do the mkvg on the local node
    #
    FIRST_NODE=$LOCAL_NODE
    REMAIN_NODES=$(IFS=, set -- $(print $LISTOFNODES | tr ',' '\n' | grep -vw $LOCAL_NODE) ; print "$*")
else
    #
    :   Will do the mkvg on the first node in the list
    #
    print $LISTOFNODES | IFS=, read FIRST_NODE REMAIN_NODES
fi
if [[ "$_CSPOC_CALLED_FROM_SMIT" == "false" ]] 
then
    ## Check if the VPATHID's are valid, if the program was not called from smit
    if [[ $RG_SUPPLIED == "true" ]]
    then
	cl_lsvpathids -cspoc "-g $_RES_GRP"  > $CL_DATFILE 2>&1
    else
	cl_lsvpathids -cspoc "-n $LISTOFNODES"  > $CL_DATFILE 2>&1
    fi
    # do error check here
    if (( $? != 0 ))
    then
	# print error message here 
	nls_msg -l $cspoc_tmp_log ${_MSET} 3 "cl_mkvg4vp: Error getting VPATHID's from nodes"  >& 2
	exit 1
    fi
fi
## We need to modify the command line and substitute  the VPATHID's with 
## the hdisk names of the first node
CMD_ARGS=""
RGFLAG="FALSE"
FDTOFLAG="FALSE"
S_FLAG="FALSE"
P_FLAG=""
v_FLAG=""
VGNAME=""
MAJOR_NUMBER=""
CONCURRENT=""
NOAUTOVARYON=""
XSiteMirror=""
OPTSTRING=":d:BGfs:l:nm:t:V:y:Cr:ESIP:v:"
while getopts $OPTSTRING opt $_CMD_ARGS
do
    case $opt in
	c )
	    #
	    :	Old SSA concurrent mode.  Turn it into enhanced concurrent mode
	    #
	    opt="C"
	    CONCURRENT="-C"
	    NOVARYON="TRUE"
	    ;;
	C )
	    #
	    :	Enhanced concurrent mode
	    #
	    CONCURRENT="-C"
	    NOVARYON="TRUE"
	    ;;
        E )
            #
            :   Enable volume group for fast disk takeover
            #
            FDTOFLAG="TRUE"
            opt="C"
            CONCURRENT="-C"
            NOVARYON="TRUE"
            ;;
        l )
            #
            :   Check for valid request for cross site LVM mirroring
            #
	    XSiteMirror=$(print $OPTARG | cldecodearg)
            if [[ $XSiteMirror == "true" && -z $(odmget HACMPsite) ]]
            then
                /usr/bin/dspmsg -s 123 cspoc.cat 17 "There are no sites configured.  Sites must be configured before setting \"Enable Cross-Site LVM Mirroring\" to true.\n" >& 2
                exit 1
            fi
            continue
            ;;
        P )
            #
            :   Pick up number of partitions.  Used only if Scalable  selected
            #
            P_FLAG="-P $OPTARG"
            continue
            ;;
        n )
            #
            :   Yes, did specify -n no auto vary on
            #
            NOAUTOVARYON="TRUE"
            ;;
        r )
            #
            :   Resource group to create/add the volume group
            #
            RGNAME=$(print $OPTARG | cldecodearg)
            continue
            ;;
        S )
            #
            :   Scalable volume group selected
            #
            S_FLAG="TRUE"
            ;;
	y ) 
	    #
	    :	Pick up volume group name
	    #
	    VGNAME=$(print $OPTARG | cldecodearg)
            if ! /usr/sbin/getlvname -n "$VGNAME" > /dev/null
            then
                #
                :   Reject invalid names that could cause problems
                #
                exit 1
            fi
	    ;;
	V )
	    #
	    :	Pick up major number
	    #
	    MAJOR_NUMBER="-V $(print $OPTARG | cldecodearg)"
	    ;;
        v)
            #
            :   Pick up number of logical volumes.  Used only if Scalable  selected
            #
            v_FLAG="-v $OPTARG"
            continue
            ;;
	* ) 
	    ;;
    esac
    #
    :	Collect arguments for the mkvg command
    #
    CMD_ARGS="$CMD_ARGS -${opt} $OPTARG"
done
if [[ $NOAUTOVARYON != "TRUE" ]]
then
    #
    :   Make real sure the volume group is not automatically varyd on
    #
    CMD_ARGS="-n "$CMD_ARGS
fi
if [[ $S_FLAG == "TRUE" ]]
then
    #
    :   Only if a Scalable volume group was selected are the -P and -v flags passed along
    #
    CMD_ARGS="$CMD_ARGS $P_FLAG $v_FLAG"
fi
#
:   Check to ensure that the PVIDs are valid on all nodes
#
AYNABTU=$(dspmsg -s 43 cspoc.cat 26 "all selected nodes")
for vpathid in $_NON_FLG_ARGS
do
    dvpathid=$(print $vpathid | cldecodearg)
    for node in $(IFS=, set -- $LISTOFNODES ; print $*)
    do
	NDISK=$(grep vpath.*$dvpathid $CL_DATFILE | egrep -w "${node}|${AYNABTU}" | cut -f2 -d: )
	if [[ -z $NDISK ]]
	then
	    #
	    :   the vpathid is not valid 
	    #	
	    nls_msg -l $cspoc_tmp_log ${_MSET} 4 "${_CMD}: Invalid PVID - The VPATHID $dvpathid either does not exist or may  be a part of an existing volume group.\n" ${_CMD} $dvpathid
	    exit 1
	fi
	if [[ $node == ${FIRST_NODE} ]]
	then
	    #
	    : substitute each VPATHID with a hdisk name
	    #
	    E_NDISK=$(print $NDISK | clencodearg)
	    CMD_ARGS="$CMD_ARGS $E_NDISK"
	    E_NDISK_LIST="$E_NDISK_LIST $E_NDISK"
	    [[ -z $HDISKTOIMPORT ]] && HDISKTOIMPORT="$NDISK"
	fi
    done
done
#
:  check if VGNAME is given - if not, pick a good one
#
if [[ -z $VGNAME ]]
then
    #
    :   No volume group name given.  Find a name of the form 'vgxx' that is
    :   unique cluster wide.
    #
    TRY_RC=0
cel_f18
    if (( $TRY_RC != 0 ))
    then
        #
        :   Any failures likely mean that there are some nodes that could not be
        :   checked.  This makes it impossible to guarantee that a cluster-unique
        :   name will be generated.
        #
         nls_msg -l ${cspoc_tmp_log} 49 6 \
            "Unable to reach all cluster nodes \n"
        if [[ -n $_SPOC_FORCE ]]                        # "force" means do it anyway
        then
            nls_msg -l ${cspoc_tmp_log} ${_MSET} 31 \
		"${_CMD}: Volume group created may not have a unique name.\n" ${_CMD} 
        else
            exit 1
        fi
    fi
    #
    :   Look at the collected cluster-wide output in $try_out, and find a name
    :   thats unique
    #
    integer MAX_VGXX_NUMBER
    VGXX_NUMBER=$(grep ' vg[0-9][0-9]*' $try_out | cut -f2 -d'g' | sort -u -n | tail -1)
    if [[ -z $VGXX_NUMBER ]]
    then
        MAX_VGXX_NUMBER=-1
    else
        MAX_VGXX_NUMBER=$VGXX_NUMBER
    fi
    if (( $MAX_VGXX_NUMBER < 99 ))
    then
        typeset -Z2 MAX_VGXX_NUMBER             # use '03' and not '3'
    fi
    MAX_VGXX_NUMBER=$(( MAX_VGXX_NUMBER + 1 ))  # this one should be free
    VGNAME=vg${MAX_VGXX_NUMBER}
    CMD_ARGS="-y $(print $VGNAME | clencodearg) $CMD_ARGS"
fi
#
: truncate the vgname to 15 characters if greater.
#
if (( ${#VGNAME} > 15 ))
then
   VGNAME=$(echo $VGNAME | cut -c1-15)
fi
#
: If the Resource Group already exists and does not match the
: the intended VG type, then this is a failure.
#
if [[ -n $RGNAME ]]
then
    new_rg=TRUE
    startup_pref=$(odmget -q "group = $RGNAME" HACMPgroup | sed -n '/startup_pref = /s/^.*"\([^ ]*\)".*/\1/p')
    if [[ -n $startup_pref ]]
    then
	new_rg=FALSE
	if [[ $startup_pref != "OAAN" ]]
	then
	    if [[ -n $CONCURRENT && $FDTOFLAG == "FALSE" ]]
	    then
	       nls_msg -l $cspoc_tmp_log ${_MSET} 32 "${_CMD}: Error attempting to add a concurrent Volume Group $VGNAME to a shared Resource Group $RGNAME \n" ${_CMD} $VGNAME $RGNAME >& 2
	       exit 1
	    fi
	else
	    if [[ -z $CONCURRENT || $FDTOFLAG == "TRUE" ]] 
	    then
	       nls_msg -l $cspoc_tmp_log ${_MSET} 33 "${_CMD}: Error attempting to add a shared Volume Group $VGNAME to a concurrent Resource Group $RGNAME \n" ${_CMD} $VGNAME $RGNAME >& 2
	       exit 1
	    fi
	fi
    fi
fi
#
: If the Resource Group is defined and it already contains
: maximum volume groups that a resource can have then adding
: another volume group is a failure.
#
if [[ -n $RGNAME ]]
then
    typeset -i vg_in_rg_count=0
    vg_in_rg_count=$(clodmget -n -q "group=$RGNAME and name=VOLUME_GROUP" -f value HACMPresource | wc -l)
    if [[ $vg_in_rg_count -ge $MAX_USER_RESOURCES ]]; then
        nls_msg -2 -l $cspoc_tmp_log ${_MSET} 38 "ERROR: Cannot add more than $MAX_USER_RESOURCES volume groups to a resource group: $RGNAME\n" $MAX_USER_RESOURCES $RGNAME
        exit 1
    fi
fi
#
:   Find out if the volume group exists on any of the cluster nodes
#
EVGNAME=$(print $VGNAME | clencodearg)
TRY_RC=0
[[ -n $_DEBUG ]] && print "DEBUG: Running clresactive on all nodes"
cel_f19
(( $TRY_RC != 0 )) && exit 1
#
: Find the nodes where the volume group exists.
#
G_NODES=$(awk '$2 != "no" {gsub(":","",$1);printf("%s ",$1)}' $try_out)
if [[ -n $G_NODES ]] ; then
    nls_msg -l $cspoc_tmp_log ${_MSET} 12 "${_CMD}: Volume Group Name $VGNAME already exists on nodes ${G_NODES}\n" ${_CMD} ${VGNAME} "${G_NODES}" >& 2
    exit 1
fi
#
: Now, we have enough to create the VG on the first node
#
RETCODE=0
cel_f20
#
: Register and Reserve all the disks of newly created volume group using SCSIPR
: If cluster services are active.
#
if LC_ALL=C lssrc -ls clstrmgrES 2>&1 | grep "Current state:" | egrep -q -v "ST_INIT|NOT_CONFIGURED"
then
	#
	:   Cluster services are active.
	#
	typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge)
	if [[ $SCSIPR_ENABLED == Yes ]]
	then
		#
		:   Register and Reserve disks of volume group as SCSIPR disk fencing in enabled.
		#
		cl_scsipr_dare_Reg_Res $VGNAME
		if (( $? != 0 ))
		then
			exit 1
		fi
	fi
fi
#
:   Set up volume group fencing for this volume group
#
cel_f21
if (( $RETCODE == 0 )) ; then
    if [[ $NOVARYON == "FALSE" ]]; then	
	#
	: Leave the volume group varyd off
	#
cel_f22
    fi
else
    exit ${RETCODE}
fi
if [[ -z "$_SPOC_FORCE" ]] ; then
    FORCE_IMPORT=""
else
    FORCE_IMPORT="-f"
fi
#
: import the vg on all other nodes 
#
WHERE=""
if [[ $RG_SUPPLIED == "true" ]]
then
    WHERE="-g $_RES_GRP"
elif [[ -n $REMAIN_NODES ]]
then
    WHERE="-n $REMAIN_NODES"
fi
#
: If the Resource Group is defined and it already contains
: maximum volume groups that a resource can have then adding
: another volume group is a failure.
#
if [[ -n $_RES_GRP ]]
then
    typeset -i vg_in_rg_count=0
    vg_in_rg_count=$(clodmget -n -q "group=$_RES_GRP and name=VOLUME_GROUP" -f value HACMPresource | wc -l)
    if [[ $vg_in_rg_count -ge $MAX_USER_RESOURCES ]]; then
        nls_msg -2 -l $cspoc_tmp_log ${_MSET} 38 "ERROR: Cannot add more than $MAX_USER_RESOURCES volume groups to a resource group: $_RES_GRP\n" $MAX_USER_RESOURCES $_RES_GRP
        exit 1
    fi
fi
if [[ -n $CONCURRENT ]]
then
    CONCURRENT="-c"
fi
if [[ -n $WHERE ]]
then
    cl_importvg -cspoc "$FORCE_IMPORT $WHERE" -R $FIRST_NODE $MAJOR_NUMBER $CONCURRENT -y $VGNAME -Q $HDISKTOIMPORT
    RETCODE=$?
    (( $RETCODE != 0 )) && exit ${RETCODE}
else
    #
    :   Warn the user that the volume group is known on only $FIRST_NODE
    #
    MENU=$(dspmsg -s 104 cspoc.cat 356 "Import a Volume Group")
    dspmsg -s 43 cspoc.cat 42 "${_CMD}: Volume group $VGNAME is defined on only node ${FIRST_NODE}.  \n\
	 Use the \"$MENU\" function to make this volume group known on other cluster nodes\n" ${_CMD} ${VGNAME} $FIRST_NODE ${MENU}
fi
#
:   Do we have to create a resource group named \$RGNAME
#
if [[ -n $RGNAME ]]
then
    #
    :   Determine the type of the resource group
    #
    if [[ -n $CONCURRENT && $FDTOFLAG == "FALSE" ]] ; then
        VGTYPE="CONCURRENT_VOLUME_GROUP"
        FILESYSTEM=""
    else
        VGTYPE="VOLUME_GROUP"
        FILESYSTEM="FILESYSTEM=ALL"
    fi
    if [[ $new_rg == "TRUE" ]]
    then
	#
	:   Create a resource group named $RGNAME containing the nodes
        :   $_TARGET_NODES
        #
        Class="HACMPgroup:"
        group="group=${RGNAME}"
        type="stype=ignore"
        nodes="nodes=$(IFS=, set -- $_TARGET_NODES ; print $*)"
        if [[ -n $CONCURRENT && $FDTOFLAG == "FALSE" ]] ; then
            startup_pref="startup_pref=OAAN"
            fallover_pref="fallover_pref=BO"
        else
            startup_pref="startup_pref=OHN"
            fallover_pref="fallover_pref=FNPN"
        fi
        fallback_pref="fallback_pref=NFB"
        #
        :       Add the new resource group definition
        #
        if ! printf "%s\n%s\n%s\n%s\n%s\n%s\n%s\n" $Class $group $type "$nodes" $startup_pref $fallover_pref $fallback_pref | odmadd
	then
	    # Re-use amsessage from a different set
	    nls_msg -l $cspoc_tmp_log 126 54 \
	    "${_CMD}: odmadd failed - could not create resource group $RGNAME\n" ${_CMD} $RGNAME >& 2
	    exit 1
	fi
        claddres -g $RGNAME ${VGTYPE}=${VGNAME} ${FILESYSTEM} FORCED_VARYON=false FSCHECK_TOOL=fsck FS_BEFORE_IPADDR=false RECOVERY_METHOD=sequential SSA_DISK_FENCING=false VG_AUTO_IMPORT=false
        RETCODE=$?
        if (( $RETCODE != 0 ))
        then
            nls_msg -l $cspoc_tmp_log ${_MSET} 20 "${_CMD}: claddres -g $RGNAME failed\n"  ${_CMD} $RGNAME >& 2
            exit 1
        fi
	#
	:   Capture new object name for completion message
	#
	NEW_OBJ_NAME=$RGNAME
	NEW_OBJ_TYPE="Resource Group"
    else
	#
	:   Add this resource to an existing resource group
	#
	Class="HACMPresource:"
	group="group=${RGNAME}"
	name="name=${VGTYPE}"
	value="value=${VGNAME}"
	id="id=0"
	if ! printf "%s\n%s\n%s\n%s\n%s\n" $Class $group $name $value $id | odmadd
	then
	    nls_msg -l $cspoc_tmp_log ${_MSET} 35 "${_CMD}: Add of volume group $VGNAME to resource group $RGNAME failed\n" ${_CMD} $VGNAME $RGNAME >& 2
	    exit 1
	fi
	#
	:   Capture new object name for completion message
	#
	NEW_OBJ_NAME=$VGNAME
	NEW_OBJ_TYPE="Volume Group"
	#
	:   Having updated the resource group, force a requirement for synchronization
	#
	Class="HACMPcluster:"
	handle="handle=0"
	printf "%s\n%s\n" $Class $handle | odmchange -o HACMPcluster
    fi
    #
    :	Warn the user that the configuration must be synchronized
    #
    nls_msg -l $cspoc_tmp_log ${_MSET} 36 "${_CMD}: The HACMP configuration has been changed - $NEW_OBJ_TYPE $NEW_OBJ_NAME has been added.  The configuration must be synchronized to make this change effective across the cluster\n" $_CMD $NEW_OBJ_TYPE $NEW_OBJ_NAME >& 2
fi
#
: Enable/Disable VG for Cross-Site LVM Mirroring
#
setvg_mode
RETCODE=$?
if (( $RETCODE != 0 ))
then
    nls_msg -l $cspoc_tmp_log ${_MSET} 37 "${_CMD}: Cross site mirroring set up failed for volume group $VNAME in $RGNAME\n" ${_CMD} $VGNAME $RGNAME
fi
#
:   Make sure PVIDs are on VPATHs, not hdisks
#
cl_dpovgfix -cspoc "-n $LISTOFNODES" -cspoc "-q" $VGNAME
RETCODE=$?
if (( $RETCODE != 0 ))
then
    exit $RETCODE
fi
#
:   Update the harvested configuration information with the new volume group, to populate pick lists
#
nls_msg -l $cspoc_tmp_log ${_MSET} 16 "${_CMD}: Discovering Volume Group Configuration...\n" ${_CMD} >& 2
/usr/es/sbin/cluster/utilities/clharvest_vg -w 1>/dev/null
RETCODE=$?
if (( $RETCODE != 0 ))
then
    nls_msg -l $cspoc_tmp_log ${_MSET} 17 "${_CMD}: Volume Group Discovery failed\n" ${_CMD} >& 2
fi
exit ${RETCODE}

