#!/bin/ksh

export try_out try_err cspoc_tmp_log
export FPATH=/usr/es/sbin/cluster/cspoc

cspoc_tmp_log=/var/hacmp/log/cel$$_tmplog
log_cmd $cspoc_tmp_log $0 $*

trap 'cexit $cspoc_tmp_log $?' EXIT
function cel_f1
{
    cel_s1=/tmp/cel$$_s1
    try_err=${cel_s1}.err
    try_out=${cel_s1}.out
    trap "log_output $cspoc_tmp_log ${cel_s1}               eval $LSLPP_CMD" EXIT
    cdsh $cel_s1 $NODE_LIST -q1               eval $LSLPP_CMD
    IFS=,$IFS
    for node in $NODE_LIST; do
	cel_rc=$(get_rc ${cel_s1} $node)
	case $cel_rc in
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f2
{
    cel_s2=/tmp/cel$$_s2
    try_err=${cel_s2}.err
    try_out=${cel_s2}.out
    trap "log_output $cspoc_tmp_log ${cel_s2} 	    eval $E_LSPV_CMD" EXIT
    IFS=,$IFS
    for node in $_REFNODE; do
	cdsh $cel_s2 $node -q 	    eval $E_LSPV_CMD
	cel_rc=$(get_rc ${cel_s2} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		TRY_RC=$cel_rc
		    		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 24 "${_CMD}: Error executing lspv on node $node.\n" ${_CMD} $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f3
{
    cel_s3=/tmp/cel$$_s3
    try_err=${cel_s3}.err
    try_out=${cel_s3}.out
    trap "log_output $cspoc_tmp_log ${cel_s3} 	eval $E_LSPV_CMD" EXIT
    IFS=,$IFS
    for node in $_NODE; do
	cdsh $cel_s3 $node -q 	eval $E_LSPV_CMD
	cel_rc=$(get_rc ${cel_s3} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	    TRY_RC=$((TRY_RC+cel_rc))
		    	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 24 "${_CMD}: Error executing lspv on node $node.\n" ${_CMD} $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f4
{
    cel_s4=/tmp/cel$$_s4
    try_err=${cel_s4}.err
    try_out=${cel_s4}.out
    trap "log_output $cspoc_tmp_log ${cel_s4} 	/usr/es/sbin/cluster/utilities/clgetactivenodes -n $E_ACTIVE_NODE" EXIT
    IFS=,$IFS
    for node in $_ACTIVE_NODE; do
	cdsh $cel_s4 $node -q 	/usr/es/sbin/cluster/utilities/clgetactivenodes -n $E_ACTIVE_NODE
	cel_rc=$(get_rc ${cel_s4} $node)
	case $cel_rc in
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f5
{
    cel_s5=/tmp/cel$$_s5
    try_err=${cel_s5}.err
    try_out=${cel_s5}.out
    trap "log_output $cspoc_tmp_log ${cel_s5} 	    clgetvg $option $parameter" EXIT
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cdsh $cel_s5 $node -q 	    clgetvg $option $parameter
	cel_rc=$(get_rc ${cel_s5} $node)
	case $cel_rc in
	    0)
				#
				:   Stop on the first node that reports the owning volume group.
				:   Note that this is just the first one that knows about the
				:   volume group - it appears in the local ODM.  Actual volume
				:   group state is determined below.
				#
				TRY_RC=0
				IFS=${IFS#,}
		IFS=${IFS#,}
		return
		;;
	    *)
		if [ $cel_rc != 0 ]; then
		                    # 
		                    :   The C-SPOC communications mechanism does not provide a
		                    :   convenient indication of the difference between being
		                    :   unable to reach a remote node, and a failure of a command
		                    :   run on that remote node.  Attempt to distinguish that here
		                    :   by looking for an error message from that node.
		                    #                                                 
		    		if [[ -f $try_err ]] &&
		    		    ! grep -q "^${node}: " $try_err # ignore any 'not found' msg
		    		then
		    		    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 1 "${_CMD}: Can\'t reach $node, continuing anyway\n" ${_CMD} $node 
		    		fi
		    		TRY_RC=$cel_rc
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f6
{
    cel_s6=/tmp/cel$$_s6
    try_err=${cel_s6}.err
    try_out=${cel_s6}.out
    trap "log_output $cspoc_tmp_log ${cel_s6} 	clresactive -v $VG" EXIT
    cdsh $cel_s6 $_TARGET_NODES -q 	clresactive -v $VG
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cel_rc=$(get_rc ${cel_s6} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	   nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 1 "${_CMD}: can't reach $node, continuing anyway\n"  ${_CMD} $node  
		    	   cel_rc=0
		    	   TRY_RC=$((TRY_RC+cel_rc))
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f7
{
    cel_s7=/tmp/cel$$_s7
    try_err=${cel_s7}.err
    try_out=${cel_s7}.out
    trap "log_output $cspoc_tmp_log ${cel_s7} 	    clvaryonvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s7 $node -q1 	    clvaryonvg $VG
	cel_rc=$(get_rc ${cel_s7} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                    TRY_RC=$cel_rc
		                    nls_msg -2 -l $cspoc_tmp_log 24 7 "error executing clvaryonvg $DVG on node $node\n" $DVG $node 
		                    if [[ -s $try_out || -s $try_err ]]
		                    then
		                        #
		                        :   If stdout or stderr was captured for this failure, show
		                        :   the information to the user
		                        #
		                        nls_msg -2 -l $cspoc_tmp_log 6 7 "Error detail:"
		                        cat -q $try_out $try_err >&2
		                    fi
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f8
{
    cel_s8=/tmp/cel$$_s8
    try_err=${cel_s8}.err
    try_out=${cel_s8}.out
    trap "log_output $cspoc_tmp_log ${cel_s8} 	    cl_pvo -v $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s8 $node -q1 	    cl_pvo -v $VG
	cel_rc=$(get_rc ${cel_s8} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		#
		    		:   Log any error, but continue.
		    		#
		                    TRY_RC=$cel_rc
		                    nls_msg -2 -l $cspoc_tmp_log 24 7 "error executing clvaryonvg $DVG on node $node\n" $DVG $node 
		                    if [[ -s $try_out || -s $try_err ]]
		                    then
		                        #
		                        :   If stdout or stderr was captured for this failure, show
		                        :   the information to the user
		                        #
		                        nls_msg -2 -l $cspoc_tmp_log 6 7 "Error detail:"
		                        cat -q $try_out $try_err >&2
		                    fi
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f9
{
    cel_s9=/tmp/cel$$_s9
    try_err=${cel_s9}.err
    try_out=${cel_s9}.out
    trap "log_output $cspoc_tmp_log ${cel_s9} 		    varyonvg -n -b -u $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s9 $node 		    varyonvg -n -b -u $VG
	cel_rc=$(get_rc ${cel_s9} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log 49 26 "Error unlocking volume group %s\n" "$DVG"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f10
{
    cel_s10=/tmp/cel$$_s10
    try_err=${cel_s10}.err
    try_out=${cel_s10}.out
    trap "log_output $cspoc_tmp_log ${cel_s10}                     varyoffvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s10 $node                     varyoffvg $VG
	cel_rc=$(get_rc ${cel_s10} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                            TRY_RC=$((TRY_RC+cel_rc))
		                            nls_msg -2 -l $cspoc_tmp_log 49 26 "Error unlocking volume group %s\n" 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f11
{
    cel_s11=/tmp/cel$$_s11
    try_err=${cel_s11}.err
    try_out=${cel_s11}.out
    trap "log_output $cspoc_tmp_log ${cel_s11} 		eval $e_update_cmd" EXIT
    IFS=,$IFS
    for node in $NODE_LIST; do
	cdsh $cel_s11 $node -q 		eval $e_update_cmd
	cel_rc=$(get_rc ${cel_s11} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		    TRY_RC=$((TRY_RC+cel_rc))
		    		    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 16 "${_CMD}: Error executing clupdatevg $DVG $_IMPORT_PVID on node $node\n" ${_CMD} $DVG $_IMPORT_PVID $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f12
{
    cel_s12=/tmp/cel$$_s12
    try_err=${cel_s12}.err
    try_out=${cel_s12}.out
    trap "log_output $cspoc_tmp_log ${cel_s12} 		    varyoffvg $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s12 $node 		    varyoffvg $VG
	cel_rc=$(get_rc ${cel_s12} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 7 "${_CMD}: Error executing varyoffvg $DVG on node $node\n" ${_CMD} $DVG $node 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f13
{
    cel_s13=/tmp/cel$$_s13
    try_err=${cel_s13}.err
    try_out=${cel_s13}.out
    trap "log_output $cspoc_tmp_log ${cel_s13} 			cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s13 $node -q1 			cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s13} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			    #
		    			    :   Log any error, but continue.
		    			    #
		    			    nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f14
{
    cel_s14=/tmp/cel$$_s14
    try_err=${cel_s14}.err
    try_out=${cel_s14}.out
    trap "log_output $cspoc_tmp_log ${cel_s14} 		    varyonvg -c -P $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s14 $node 		    varyonvg -c -P $VG
	cel_rc=$(get_rc ${cel_s14} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 56 "${_CMD}: Error executing varyonvg -c -P $DVG on node $node\n" $_CMD $DVG 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f15
{
    cel_s15=/tmp/cel$$_s15
    try_err=${cel_s15}.err
    try_out=${cel_s15}.out
    trap "log_output $cspoc_tmp_log ${cel_s15} 		    cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s15 $node -q1 		    cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s15} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			#
		    			:   Log any error, but continue.
		    			#
		    			nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f16
{
    cel_s16=/tmp/cel$$_s16
    try_err=${cel_s16}.err
    try_out=${cel_s16}.out
    trap "log_output $cspoc_tmp_log ${cel_s16} 		cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s16 $node -q1 		cl_set_vg_fence_height -c $VG $(print 'ro' | clencodearg)
	cel_rc=$(get_rc ${cel_s16} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		    #
		    		    :   Log any error, but continue.  If this is a real problem, the varyonvg will fail
		    		    #
		    		    nls_msg -2 -l $cspoc_tmp_log 43 50 "$PROGNAME: Volume group $DVG fence height could not be set to read/only" $PROGNAME $DVG "$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f17
{
    cel_s17=/tmp/cel$$_s17
    try_err=${cel_s17}.err
    try_out=${cel_s17}.out
    trap "log_output $cspoc_tmp_log ${cel_s17} 		    varyonvg -n $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s17 $node 		    varyonvg -n $VG
	cel_rc=$(get_rc ${cel_s17} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=$((TRY_RC+cel_rc))
		    			nls_msg -2 -l $cspoc_tmp_log 49 29 "Error re-locking volume group %s\n" "$DVG"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f18
{
    cel_s18=/tmp/cel$$_s18
    try_err=${cel_s18}.err
    try_out=${cel_s18}.out
    trap "log_output $cspoc_tmp_log ${cel_s18} 	    exportvg $VG" EXIT
    IFS=,$IFS
    for node in $NODE_LIST; do
	cdsh $cel_s18 $node -q 	    exportvg $VG
	cel_rc=$(get_rc ${cel_s18} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		nls_msg -2 -l $cspoc_tmp_log 37 3 "${_CMD}: Could not export volume group $DVG\n" ${_CMD} ${DVG} 
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f19
{
    cel_s19=/tmp/cel$$_s19
    try_err=${cel_s19}.err
    try_out=${cel_s19}.out
    trap "log_output $cspoc_tmp_log ${cel_s19}     eval $query_cmd" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s19 $node -q1     eval $query_cmd
	cel_rc=$(get_rc ${cel_s19} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		            try_rc=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f20
{
    cel_s20=/tmp/cel$$_s20
    try_err=${cel_s20}.err
    try_out=${cel_s20}.out
    trap "log_output $cspoc_tmp_log ${cel_s20}         eval $MPQ_CMD" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s20 $node -q1         eval $MPQ_CMD
	cel_rc=$(get_rc ${cel_s20} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                TRY_RC=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f21
{
    cel_s21=/tmp/cel$$_s21
    try_err=${cel_s21}.err
    try_out=${cel_s21}.out
    trap "log_output $cspoc_tmp_log ${cel_s21} 	    eval $LSMP_CMD" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s21 $node -q1 	    eval $LSMP_CMD
	cel_rc=$(get_rc ${cel_s21} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		TRY_RC=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f22
{
    cel_s22=/tmp/cel$$_s22
    try_err=${cel_s22}.err
    try_out=${cel_s22}.out
    trap "log_output $cspoc_tmp_log ${cel_s22}     $E_GET_LV_COMMAND" EXIT
    cdsh $cel_s22 $_CLUSTER_NODES -q     $E_GET_LV_COMMAND
    IFS=,$IFS
    for node in $_CLUSTER_NODES; do
	cel_rc=$(get_rc ${cel_s22} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	TRY_RC=1
		    	nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 16 \
		    	    "Unable to obtain logical volume names from cluster node %s\n"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f23
{
    cel_s23=/tmp/cel$$_s23
    try_err=${cel_s23}.err
    try_out=${cel_s23}.out
    trap "log_output $cspoc_tmp_log ${cel_s23}     eval $CHK_CMD" EXIT
    IFS=,$IFS
    for node in $_TARGET_NODES; do
	cdsh $cel_s23 $node -q     eval $CHK_CMD
	cel_rc=$(get_rc ${cel_s23} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	TRY_RC=1
		    	nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 45 \
		    	    "${_CMD}: Mount point $DMP already is in use on node $node" ${_CMD} $DMP $node
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f24
{
    cel_s24=/tmp/cel$$_s24
    try_err=${cel_s24}.err
    try_out=${cel_s24}.out
    trap "log_output $cspoc_tmp_log ${cel_s24}     eval $LSVG_CMD" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s24 $node -q     eval $LSVG_CMD
	cel_rc=$(get_rc ${cel_s24} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	TRY_RC=1
		    	nls_msg -l ${cspoc_tmp_log} ${_MSET} 21 \
		    	    "Error getting volume group information\n"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f25
{
    cel_s25=/tmp/cel$$_s25
    try_err=${cel_s25}.err
    try_out=${cel_s25}.out
    trap "log_output $cspoc_tmp_log ${cel_s25} 	lsvg -l $VG" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s25 $node -q 	lsvg -l $VG
	cel_rc=$(get_rc ${cel_s25} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	    TRY_RC=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f26
{
    cel_s26=/tmp/cel$$_s26
    try_err=${cel_s26}.err
    try_out=${cel_s26}.out
    trap "log_output $cspoc_tmp_log ${cel_s26} 	    mklv -t $E_JFSLOG -y $E_LOG_LV $MP_COPY $VG $E_LOG_PARTITIONS" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s26 $node 	    mklv -t $E_JFSLOG -y $E_LOG_LV $MP_COPY $VG $E_LOG_PARTITIONS
	cel_rc=$(get_rc ${cel_s26} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		TRY_RC=1
		    		nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 22 \
		    		    "Error creating log logical volume %s\n" "$CLSTR_UNIQ_LOG_LV"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f27
{
    cel_s27=/tmp/cel$$_s27
    try_err=${cel_s27}.err
    try_out=${cel_s27}.out
    trap "log_output $cspoc_tmp_log ${cel_s27} 	    eval $E_FORMAT_COMMAND" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s27 $node 	    eval $E_FORMAT_COMMAND
	cel_rc=$(get_rc ${cel_s27} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		TRY_RC=1
		    		nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 23 \
		    		    "Error formatting log logical volume %s\n" "$CLSTR_UNIQ_LOG_LV"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f28
{
    cel_s28=/tmp/cel$$_s28
    try_err=${cel_s28}.err
    try_out=${cel_s28}.out
    trap "log_output $cspoc_tmp_log ${cel_s28}     mklv -t $E_JFS -y $E_FS_LV $MP_COPY $VG $E_NUM_PARTITIONS" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s28 $node     mklv -t $E_JFS -y $E_FS_LV $MP_COPY $VG $E_NUM_PARTITIONS
	cel_rc=$(get_rc ${cel_s28} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 24 \
		    	    "Error creating logical volume %s\n" "$CLSTR_UNIQ_LV_NAME"
		    	TRY_RC=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f29
{
    cel_s29=/tmp/cel$$_s29
    try_err=${cel_s29}.err
    try_out=${cel_s29}.out
    trap "log_output $cspoc_tmp_log ${cel_s29}     crfs -d $E_FS_LV $CRFS_PASS_OPTS" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s29 $node     crfs -d $E_FS_LV $CRFS_PASS_OPTS
	cel_rc=$(get_rc ${cel_s29} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    	#
		    	:   If it returned an error on CL_NODE, note that for later clean up
		    	#
		            TRY_RC=1
		            nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 25 \
		               "Error creating filesystem \n"
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f30
{
    cel_s30=/tmp/cel$$_s30
    try_err=${cel_s30}.err
    try_out=${cel_s30}.out
    trap "log_output $cspoc_tmp_log ${cel_s30} 		    mount $_MOUNT_POINT" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s30 $node 		    mount $_MOUNT_POINT
	cel_rc=$(get_rc ${cel_s30} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    			TRY_RC=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f31
{
    cel_s31=/tmp/cel$$_s31
    try_err=${cel_s31}.err
    try_out=${cel_s31}.out
    trap "log_output $cspoc_tmp_log ${cel_s31}                         eval $LSLPP_CMD" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s31 $node -q1                         eval $LSLPP_CMD
	cel_rc=$(get_rc ${cel_s31} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		                                TRY_RC=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f32
{
    cel_s32=/tmp/cel$$_s32
    try_err=${cel_s32}.err
    try_out=${cel_s32}.out
    trap "log_output $cspoc_tmp_log ${cel_s32}                             eval $MOUNTGUARD" EXIT
    IFS=,$IFS
    for node in $CL_NODE; do
	cdsh $cel_s32 $node                             eval $MOUNTGUARD
	cel_rc=$(get_rc ${cel_s32} $node)
	case $cel_rc in
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
function cel_f33
{
    cel_s33=/tmp/cel$$_s33
    try_err=${cel_s33}.err
    try_out=${cel_s33}.out
    trap "log_output $cspoc_tmp_log ${cel_s33} 	    eval $FS_UPDATE_CMD" EXIT
    IFS=,$IFS
    for node in $P_NODES; do
	cdsh $cel_s33 $node 	    eval $FS_UPDATE_CMD
	cel_rc=$(get_rc ${cel_s33} $node)
	case $cel_rc in
	    *)
		if [ $cel_rc != 0 ]; then
		    		TRY_RC=1
		fi
		;;
	esac
    done
    IFS=${IFS#,}
    return $cel_rc
}
#  
#  ALTRAN_PROLOG_BEGIN_TAG                                                     
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2021.  All rights reserved.         	
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                       
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
# 61haes_r714 src/43haes/usr/sbin/cluster/cspoc/plans/cl_crlvfs.cel 1.43.1.1
#  
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 2000,2013 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  d82b234 43haes/usr/sbin/cluster/cspoc/plans/cl_crlvfs.cel, 726, 2147A_aha726, Apr 29 2021 12:22 AM
# DESCRIPTION:
#    This C-SPOC plan creates a jfs or jfs2 filesystem and underlying logical
#    volume within an existing volume group.  If no log logical volume exists
#    this plan will create and format a new log logical volume.
#
# PARAMETERS:
#   The cl_crlvfs command arguments include most options and arguments
#   that are valid for the AIX crfs command as well as C-SPOC specific
#   arguments.   The C-SPOC specific arguments are as follows:
#     	-d 1..9			Debug level
# 	-f			C-SPOC force flag
#                                 This flag forces execution of this command
#                                 even when one or more nodes is inaccessible
#                                 or the volume group is varied-off.
#       -n NodeList             List of nodes on which to execute command
#       -g ResourceGroup        Resource group whose node list will be used
#                                as the list of nodes on which to execute
#
#   The crfs specific arguments which are not supported are
#       -d  A new logical volume is always created
#       -n  This is not about NFS
#	-A  Always forced to "no - do not automount at system restart"
#
# Return Values:
#       0       success
#       1       failure
#
###############################################################################
#
: Include the PATH and PROGNAME initialization stuff
#
# @(#)69        1.8  src/43haes/usr/sbin/cluster/cspoc/plans/cl_path.cel, hacmp.cspoc, 61haes_r720, 1539B_hacmp720 9/10/15 13:28:25
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/cspoc/plans/cl_path.cel 1.8 
#  
# Licensed Materials - Property of IBM 
#  
# Restricted Materials of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1999,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
 ################################################################################
#   COMPONENT_NAME: CSPOC
#
# Name:
#       cl_path.cel
#
# Description:
#       C-SPOC Path Initialization Routine.  This routine is to be included
#       in all C-SPOC Execution Plans (e.g. '%include cl_path.cel').
#       it sets up the PATH environment variable to prevent hardcoding of 
#       path names in the CSPOC code.
#
# Arguments:
#       None.
#
# Return Values:
#	None.
#
# Environment Variables Defined:
#
#   PUBLIC:
#	PROGNAME Represents the name of the program 
#	HA_DIR Represents the directory the HA product is shipped under.
#
################################################################################
PROGNAME=${0##*/}
PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
# set the HA_DIR env variable to the HA directory
HA_DIR="es"
# Set up useful prompt for when 'set -x' is turned on through _DEBUG
if [[ -n $_DEBUG ]] && (( $_DEBUG == 9 ))
then
    PS4='${PROGNAME:-$_CMD}[$LINENO]: '
    set -x
fi
export PATH=$PATH:/usr/es/sbin/cluster/sa/sbin/
###############################################################################
#
# NAME: _RESTORE_STATE_AND_EXIT
#
# DESCRIPTION:
#      restores varyon state of vg on node and exits
#
# PARAMETERS:
#    $1 (the first command line parameter)
#       this is a numeric parameter which hints at how the overall program
#       should exit.  If it has a value of 0 and restoring the state is
#       successful, the overall program will exit with a 0 value.  Otherwise
#       it will exit with a value 1 greater than this parameter
#
# RETURN VALUES:
#      none -- this function does not return
#
# EXIT CODES:
#      This function generates an exit 0 if restoring the varyon state
#      was successful and the first parameter passed to this function is 0.
#      Otherwise, it returns the first parameter passed in plus 1
#      (i.e. ${1} + 1)
#
# GLOBAL VARIABLES:
#      global variables read by _vg_sync
#                  DVG
#                  CL_NODE
#                  VG_ACTIVE
#
###############################################################################
function _RESTORE_STATE_AND_EXIT
{
    if _vg_sync release &&              # restore volume group state and
	(( $1 == 0 ))		        # no errors so far	
    then
	exit 0					# we can just leave
    else
	EXIT_WITH_CLEANUP_MSG			# else tell the user the bad news
    fi
}
###############################################################################
#
# NAME:  _IMPORT_RESTORE_EXIT
#
# DESCRIPTION:
#              Unlocks volume group on active node,
#              performs an importvg -L on other nodes
#              restores varyon state on active node to its initial state
#
# PARAMETERS:
#    $1 (the first command line parameter)
#       this is a numeric parameter which hints at how the overall program
#       should exit.  If it has a value of 0 and there are no failures after
#       this function is called, then the overall program will exit with a
#       0 value.  If it is called with a value greater than 0 the overall
#       program will exit with a value greater than 0 even if there are
#       no subsequent failures.
#
# RETURN VALUES:
#              None -- this function does not return
#
# EXIT CODES:
#      This function generates an exit 0 it was passed a 0 parameter and
#      there are no failures after entering this function.  If this function
#      is passed a value greater than 0, or there are failures after entering
#      this function, then this function will generate an exit with an exit
#      code greater than 0
#
# GLOBAL VARIABLES:
#      global variables read by _vg_sync:
#                  DVG
#                  CL_NODE
#                  VG_ACTIVE
#
#
###############################################################################
function _IMPORT_RESTORE_EXIT
{
    integer return_code=$1			# results so far
    _vg_sync sync
    return_code=$(( $return_code + $? ))	# note any new problems
    _RESTORE_STATE_AND_EXIT $return_code
}
###############################################################################
#
# NAME:  EXIT_WITH_CLEANUP_MSG
#
# DESCRIPTION:
#              Prints an internationalized error message
#              and exits with exit code 1
#
# PARAMETERS:
#              None
#
# RETURN VALUES:
#              None -- this function does not return
#
# EXIT CODES:
#              1
#
# GLOBAL VARIABLES:
#              None referenced
#
#
###############################################################################
function EXIT_WITH_CLEANUP_MSG
{
   nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 30 \
        "Exiting due to errors.\n"
   nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 31 \
        "User action required to correct or complete changes.\n"
   exit 1
}
###############################################################################
#
# NAME:	       is_mp_svg
#
# DESCRIPTION:
#              Check to see if the level of AIX on all cluster nodes is 
#	       capable of supporting mirror pools
#
# PARAMETERS:
#              None
#
# RETURN VALUES:
#              The value of $SVG_TYPE or ${SVG_TYPE)mp is written to stdout,
#	       depending on whether mirror pools can be supported
#
# EXIT CODES:
#              0
#
# GLOBAL VARIABLES:
#              SVG_TYPE - the volume group type being referenced; presumably '2'
#
###############################################################################
is_mp_svg()
{
    integer V R M F
    typeset -Z2 R                       # two digit release
    typeset -Z3 M                       # three digit modification
    typeset -Z3 F                       # three digit fix
    integer VRMF=0
    typeset VG_type
    typeset LSLPP_CMD
    typeset NODE_LIST
    VG_type=${SVG_TYPE}"mp"             # assume OK till proven otherwise
    #
    :   First, check the already collected level of PowerHA across the
    :   cluster.  If its 7.1 or greater, the known AIX pre-req means
    :   that mirror pools are supported
    #
    if [[ -s /usr/es/sbin/cluster/etc/config/haver.info ]]
    then
        tail +2 /usr/es/sbin/cluster/etc/config/haver.info | \
            while read node ha_level
                do
                    if (( $ha_level <= 7100 ))
                    then
                        #
                        :       PowerHA is at level $ha_level on node $node
                        #
                        VG_type=$SVG_TYPE       # not guaranteed mirror pool support
                        break
                    fi
                done
    else
        VG_type=$SVG_TYPE                       # no version info to check
    fi
    #
    :   The check of the collected PowerHA levels shows some earlier than 7.1
    :   So, have to check the AIX level on each node
    #
    if [[ $VG_type == $SVG_TYPE ]]
    then
        VG_type=${SVG_TYPE}"mp"         # assume OK till proven otherwise
        #
        :   Scalable volume groups can support mirror pools only if all
        :   nodes on which the volume group is known are at 6.1 or better
        :   Check to see what is installed.
        #
        LSLPP_CMD=$(print -- 'lslpp -lcqOr bos.rte.lvm | cut -f3 -d:' | clencodearg)
        NODE_LIST=$(IFS=, set -- $G_NODES ; print "$*")
cel_f1
        #
        :   Process the output from that check - is any node at lower
        :   than 6.1.2?
        #
        cut -f2 -d: $try_out | while IFS=. read V R M F
            do
                VRMF=$V$R$M$F
                if (( $VRMF < 601002000 ))
                then
                    #
                    :   This node is not at a level that supports
                    :   mirror pools
                    #
                    VG_type=$SVG_TYPE
                    break
                fi
            done
    fi
    #
    :   Print out either '2' or '2mp' to indicate whether mirror
    :   pools are supported
    #
    print $VG_type
    return 0
}
###############################################################################
# Start of main script
###############################################################################
# Initialize variables
_CMD_NAME=${0##*/}
_CSPOC_OPT_STR="d:f?[g:n:]"
_MSET=49
_LVM_MSET=5
integer TRY_RC=0
# Usage: cl_crlvfs -cspoc "[-f] [-n Nodelist | -g ResourceGroup]"
#        -v VFS -g Volumegroup -m Mountpoint
#        [-u Mountgroup] [-t {yes|no}] [-p {ro|rw}]
#        [-l LogPartitions] -a size=Value [[-a Attribute=Value]...]
_OPT_STR="v^g^m^u:A:t:p:l:a:F:"
_USAGE="$(dspmsg -s 49 cspoc.cat 1 'Usage: cl_crlvfs -cspoc \"[-f] [-g ResourceGroup | -n NodeList]\" -v Vfs -g Volumegroup -m Mountpoint [-u Mountgroup] [-A {yes|no}] [-t {yes|no}] [-p {ro|rw}] [-l Logpartitions] -a size=Value [[-a Attribute=Value]...]')"
# This script requires HA 5.5.0.0 or higher
_VER="5500"
_VERSION="5.5.0.0"
#
:   Include CELPP init code and verification routines.
#
#  ALTRAN_PROLOG_BEGIN_TAG                                                    
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                      
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog.
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/cspoc/plans/cl_init.cel 1.16.7.9 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1996,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/plans/cl_init.cel, 726, 2147A_aha726, Feb 05 2021 09:50 PM
################################################################################
#
# COMPONENT_NAME: CSPOC
#
# Name:
#       cl_init.cel
#
# Description:
#       C-SPOC Initialization Routine.  This routine is to be included
#       in all C-SPOC Execution Plans (e.g. '%include cl_init.cel').
#       It defines the ksh functions required to implement C-SPOC commands.
#
# Arguments:
#       None.
#
# Return Values:
#       None.
#
# Environment Variables Defined:
#
#   PUBLIC:
#       _OPT_STR            Specifies the list of valid command flags.
#                           Must be specified in the execution plan.
#
#       _CSPOC_OPT_STR      Specifies the list of valid CSPOC flags.
#                           Must be specified in the execution plan.
#
#       cspoc_tmp_log       Full path of the cspoc log file
#                           (/var/hacmp/log/cspoc.log).
#
#       _CLUSTER_NODES      A comma separated list of all nodes in the cluster.
#
#       _NODE_LIST          A comma separated list of nodes from the command
#                           line (i.e. Those specified by -n or implied by -g).
#
#       _TARGET_NODES       A comma separated list that specify the target
#                           nodes for a generated C-SPOC script.
#
#       BADNODES            A space-separated list that specifies the nodes
#                           that are either not defined in the cluster or not
#                           reachable for a generated C-SPOC script.
#
#       _RES_GRP            The resource group specified by -g on the
#                           command line
#
#       _SPOC_FORCE         Set to "Y" when -f specified.  Otherwise not set.
#
#       _DEBUG              Set to <debug_level> when -d specified.
#                           Otherwise not set.
#
#       _CMD_ARGS           The AIX Command Options and arguments from the
#                           C-SPOC command
#
#       _NUM_CMD_ARGS       The number of AIX Command Options and arguments
#                           from the C-SPOC command
#
#       _NON_FLG_ARGS       The non-flag arguments from the C-SPOC command.
#
#       _OF_NA              A list of the optional command flags specified
#                           that do NOT require an option argument.
#
#       _MF_NA              A list of the mandatory command flags specified
#                           that do NOT require an option argument.
#
#       _OF_WA              A list of the optional command flags specified
#                           that require an option argument.
#
#       _MF_WA              A list of the mandatory command flags specified
#                           that require an option argument.
#
#       _VALID_FLGS         A list of valid command flags.
#
#       _CSPOC_OPTS         The CSPOC Options specified on the command line
#                           following the '-cspoc' flag.
#
#       _CSPOC_OF_NA        A list of the optional CSPOC flags specified that
#                           do NOT require an option argument.
#
#       _CSPOC_MF_NA        A list of the mandatory CSPOC flags specified that
#                           do NOT require an option argument.
#
#       _CSPOC_OF_WA        A list of the optional CSPOC flags specified that
#                           require an option argument.
#
#       _CSPOC_MF_WA        A list of the mandatory CSPOC flags specified that
#                           require an option argument.
#
#       _CSPOC_VALID_FLGS   A list of valid CSPOC flags for this CSPOC command.
#
#       CLUSTER_OVERRIDE    Flag to Cluster Aware AIX Commands to signal that
#                           base AIX commands should be allowed to operate.
#                           Applies to 7.1.0 and later.
#
################################################################################
################################################################################
#
# _get_node_list
#
# DESCRIPTION:
#   Generates two lists _CLUSTER_NODES is a list of all nodes in the cluster.
#
################################################################################
function _get_node_list
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_node_list version 1.16.7.9"
        if (( $_DEBUG >= 8 )); then
            typeset PROGNAME="_get_node_list"
            set -x
        fi
    fi
    unset _CLUSTER_NODES
    typeset NODE IP_ADDR
    #
    : GET A comma separated LIST OF ALL NODES IN THE CLUSTER
    #
    _CLUSTER_NODES=$(IFS=, set -- $(clodmget -q "object = COMMUNICATION_PATH" -f name -n HACMPnode) ; print "$*")
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: CLUSTER NODES [${_CLUSTER_NODES}]"
        print "DEBUG: Leaving _get_node_list"
    fi
    #
    : ENSURE THAT NODES FOUND FOR THE CLUSTER
    #
    if [[ -z ${_CLUSTER_NODES} ]]; then
        nls_msg -2 21 6 \
            "${_CMD}: The cluster does not appear to be configured - no nodes are defined.  \n  Configure the cluster, nodes and networks then try this operation again.\n" $_CMD
        return 1
    fi
    return 0
} # End of "_get_node_list()"
################################################################################
#
# _get_target_nodes
#
# DESCRIPTION
#   Sets environment variable $_TARGET_NODES to the list of cluster
#   on which the C-SPOC command is to be executed.
#
#	1 - If a node list was specified $_TARGET_NODES is set to
#	    the nodes listed.
#
#	2 - If a resource group was specified $_TARGET_NODES is set
#	    to the list of nodes that are participating in that
#	    resource group.
#
#	3 - If neither a node list or resource group has been specified
#	    then $_TARGET_NODES is set to a list of all nodes in the cluster.
#
################################################################################
function _get_target_nodes
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_target_nodes version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_target_nodes"
            set -x
        fi
    fi
    typeset NODE=""
    integer GTN_RC=-1
    #
    : If given a node list, or the nodes in a resource group, use those
    #
    if [[ -n $_NODE_LIST || -n $_RG_NODE_LIST ]]
    then
        _TARGET_NODES=$(IFS=, set -- $_NODE_LIST $_RG_NODE_LIST ; print "$*")
        GTN_RC=0
    #
    : If no node list given, assume all cluster nodes, if we can find them
    #
    elif [[ -n $_CLUSTER_NODES ]]
    then
        _TARGET_NODES="$_CLUSTER_NODES"
        GTN_RC=0
    #
    : Else cannot figure out where to run this
    #
    else
        nls_msg -2 -l ${cspoc_tmp_log} 4 6 \
        "%s: Unable to determine target node list!\n" "$_CMD"
        GTN_RC=1
    fi
    return $GTN_RC
} # End of "_get_target_nodes()"
################################################################################
#
# _get_rgnodes
#
# DESCRIPTION
#   Gets a list of nodes associated with the resource group specified.
#
################################################################################
function _get_rgnodes
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _get_rgnodes version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_rgnodes"
            set -x
        fi
    fi
    if [[ -z $1 ]]
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 9 \
            "%s: _get_rgnodes: A resource group must be specified.\n" "$_CMD"
        return 1
    fi
    _RG_NODE_LIST=$(clodmget -q "group = $1" -f nodes -n HACMPgroup)
    if [[ -z $_RG_NODE_LIST ]]
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 50 \
            "%s: Resource group %s not found.\n" "$_CMD" "$1"
        return 1
    fi
    return 0
} # End of "_get_rgnodes()"
#######################################################################
#
# _getopts
#
# DESCRIPTION
#   Parses comand line options for C-SPOC commands.
#
#######################################################################
#
# OPTION STRING
#   The _getopts() routine requires the execution plan to define the
#   environment variable $_OPT_STR which is refered to as the option
#   string.  The option string is used to define valid and/or required
#   flags, the required number of non-flag arguments, and what flags
#   may or may not be specified together.
#
#    Operator   Description                                  Example
#    --------   ------------------------------------------   ---------
#	()	Groups mutually required flags               (c!d:)
#	[]	Groups mutually exclusive flags              [f,b,]
#
#	?	Optional flag (default)                      b?
#	!	Mandatory flag                               c!
#
#	:	Optional flag that requires an argument      d:
#	^	Mandatory flag that requires an argument     e^
#
#	.	Optional multi-byte flag
#	,	Mandatory multi-byte flag                    f,
#
#	+N	Indicates that N non-flag arguments are.     +2
#               required. It must be at the beginning of
#               the option string.
#
#   Notes:
#	1 - A flag that can be specified with or without an argument
#           would be specified twice as follows: _OPT_STR="a?a:"
#
#	2 - A flag that requires an argument cannot also be the first
#           letter of a multi-byte flag.  (i.e. -b arg -boot ) as there
#           is no way to differentiate between the two.
#
#  Example:
#    The following option string would correspond to the usage below
#    In the usage '[]' indicates optional flags and '()' indicates
#    grouping.
#
#	_OPT_STR="+2ab?(c!d:)e^[f,b,]g."
#
#    Usage:
#     cmd [-a] [-b] -c [-d arg] -e arg ( -foo | -bar ) [-go] arg1 arg2 [arg3]
#
#
#######################################################################
function _getopts
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _getopts 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_get_opts"
            set -x
        fi
    fi
    typeset CMD=${0##*/}
    # unset the following variables to avoid these variables being
    # influenced implicitly by external environment. Note that we will
    # not unset/touch _DEBUG since it is being checked even before hitting
    # this part of the code. i.e. depending upon the _DEBUG flag we set
    # set -x option initially itself.
    unset _NODE_LIST
    unset _RES_GRP
    unset _CSPOC_QUIET
    # LOCAL VARIABLES
    typeset _OPT_STR _CSPOC_OPT_STR OPT X Y
    typeset _VALID_FLGS _CSPOC_VALID_FLGS
    typeset _OF_NA _MF_NA _OF_WA _MF_WA
    typeset _CSPOC_OF_NA _CSPOC_MF_NA _CSPOC_OF_WA _CSPOC_MF_WA
    typeset _GOPT=no _NOPT=no
    # THE FIRST TWO ARGS MUST BE OPTION STRINGS
    _CSPOC_OPT_STR=$1
    _OPT_STR=$2
    shift 2
    # CHECK CSPOC OPT STRING SPECIFIED IN THE EXECUTION PLAN
    # FOR OPTIONAL OR REQUIRED FLAGS
    [[ $_CSPOC_OPT_STR == *g^* ]] && _GOPT=req
    [[ $_CSPOC_OPT_STR == *g:* ]] && _GOPT=opt
    [[ $_CSPOC_OPT_STR == *n^* ]] && _NOPT=req
    [[ $_CSPOC_OPT_STR == *n:* ]] && _NOPT=opt
    # CHECK IF THE OPTION STRINGS SPECIFY A REQUIRED NUMBER OF NON-FLAG ARGS
    if [[ $_OPT_STR == +* ]]
    then
        X=${_OPT_STR#??}
        Y=${_OPT_STR%"$X"}
        _OPT_STR=$X
        _NUM_ARGS_REQ=${Y#?}
    fi
    # PARSE THE OPTION STRING ($_OPT_STR) INTO FIVE LISTS
    #  ${_OF_NA} is a list of optional flags that DO NOT take an option arg.
    #  ${_MF_NA} is a list of mandatory flags that DO NOT take an option arg.
    #  ${_OF_WA} is a list of mandatory flags that DO take an option argument
    #  ${_MF_WA} is a list of optional flags that DO take an option argument
    #  ${_VALID_FLGS} is a list of all valid flags.
    # Note that both strings start and end with a space (to facilitate grepping)
    # and contain a list of space separated options each of which is preceded
    # by a minus sign.
    # THE FOLLOWING WHILE LOOP SIMPLY ORGANIZES THE VALID FLAGS INTO
    # FOUR LISTS THAT CORRESPOND TO THE FOUR FLAG TYPES LISTED ABOVE
    # AND A FIFTH LIST THAT INCLUDES ALL VALID FLAGS.
    X=${_OPT_STR}
    [[ $X == '-' ]] && X=""
    while [[ -n ${X} ]]
    do
        # GET THE NEXT LETTER OF THE OPTION STRING
        Y=${X#?}
        OPT=${X%"$Y"}
        X=${Y}
        # CHECK FOR AND PROCESS MUTALLY REQUIRED OR MUTUALLY EXCLUSIVE FLAGS
        case $OPT in
            '(') # STARTS A GROUP OF MUTUALLY REQUIRED FLAGS
                 if [[ -n $MUTREQ ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "("'
                     return 1
                 fi
                 MUTREQ=Y
                 continue
            ;;
            ')') # ENDS A GROUP OF MUTUALLY REQUIRED FLAGS
                 if [[ -z $MUTREQ ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character ")"'
                     return 1
                 fi
                 MUTREQ=""
                 MUTREQ_FLAGS=$MUTREQ_FLAGS" "
                 continue
            ;;
            '[') # STARTS A GROUP OF MUTUALLY EXCLUSIVE FLAGS
                 if [[ -n $MUTEX ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "["'
                     return 1
                 fi
                 MUTEX=Y
                 continue
            ;;
            ']') # ENDS A GROUP OF MUTUALLY EXCLUSIVE FLAGS
                 if [[ -z $MUTEX ]]
                 then
                     print '$_CMD: _getopts: Invlid format for $_OPT_STR'
                     print '$_CMD: _getopts: Unexpected character "]"'
                     return 1
                 fi
                 MUTEX=""
                 MUTEX_FLAGS=$MUTEX_FLAGS" "
                 continue
            ;;
        esac
        # KEEP A LIST OF MUTUALLY EXCLUSIVE FLAGS
        if [[ -n $MUTEX && $MUTEX_FLAGS != *${OPT}* ]]; then
            MUTEX_FLAGS=${MUTEX_FLAGS}${OPT}
        fi
        # KEEP A LIST OF MUTUALLY REQUIRED FLAGS
        if [[ -n $MUTREQ && $MUTREQ_FLAGS != *${OPT}* ]]; then
            MUTREQ_FLAGS=${MUTREQ_FLAGS}${OPT}
        fi
        # KEEP A LIST OF ALL VALID FLAGS
        _VALID_FLGS="${_VALID_FLGS} -$OPT"
        # DETERMINE THE FLAG TYPE AS DESCRIBED ABOVE
        # ADD THE FLAG TO THE APPROPRIATE LIST AND
        # STRIP OFF THE FLAG TYPE IDENTIFIER FROM
        # THE OPTION STRING '${_OPT_STR}'.
        case $X in
            '.'*) # OPTIONAL MULTI-BYTE FLAG
                  X=${X#.}
                  _OF_MB="${_OF_MB} -$OPT"
            ;;
            ','*) # MANDATORY MULTI-BYTE FLAG
                  X=${X#,}
                  _MF_MB="${_MF_MB} -$OPT"
            ;;
            ':'*) # OPTIONAL FLAG THAT REQUIRES AN ARGUMENT
                  X=${X#:}
                  _OF_WA="${_OF_WA} -$OPT"
            ;;
            '^'*) # MANDATORY FLAG THAT REQUIRES AN ARGUMENT
                  X=${X#^}
                  _MF_WA="${_MF_WA} -$OPT"
            ;;
            '!'*) # MANDATORY FLAG
                  X=${X#!}
                  _MF_NA="${_MF_NA} -$OPT"
            ;;
            '?'*) # OPTIONAL FLAG
                  X=${X#?}
                  _OF_NA="${_OF_NA} -$OPT"
            ;;
            *)    # OPTIONAL FLAG
                  _OF_NA="${_OF_NA} -$OPT"
            ;;
        esac
    done # End of the option "while" loop
    # TACK A SPACE ONTO THE END OF EACH LIST TO MAKE OPTION GREPPING SIMPLE
    _VALID_FLGS=$_VALID_FLGS" "
    _OF_NA=$_OF_NA" " ; _OF_WA=$_OF_WA" " ; _OF_MB=$_OF_MB" "
    _MF_NA=$_MF_NA" " ; _MF_WA=$_MF_WA" " ; _MF_MB=$_MF_MB" "
    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 3 )) 
    then
        print "DEBUG(3): _OF_NA=$_OF_NA"
        print "DEBUG(3): _MF_NA=$_MF_NA"
        print "DEBUG(3): _OF_WA=$_OF_WA"
        print "DEBUG(3): _MF_WA=$_MF_WA"
        print "DEBUG(3): _OF_MB=$_OF_MB"
        print "DEBUG(3): _MF_MB=$_MF_MB"
        print "DEBUG(3): _VALID_FLGS=$_VALID_FLGS"
    fi
    # PARSE THE COMMAND LINE ARGS
    let _NUM_CMD_ARGS=0
    while [[ -n $* ]]
    do
        THIS_FLAG=$1
        THIS_ARG=${THIS_FLAG#??}
        THIS_FLAG=${THIS_FLAG%"$THIS_ARG"}
        if [[ -n $_DEBUG ]]
        then
            print "THIS_FLAG=\"$THIS_FLAG\""
            print "THIS_ARG=\"$THIS_ARG\""
        fi
        if [[ $1 == '-cspoc' ]]
        then
            #
            :   Check for and process any CSPOC flags
            #
		_CSPOC_OPTS=$2
            if [[ -z $_CSPOC_OPTS || $_CSPOC_OPTS == *([[:space:]]) ]]
            then
                SHIFT=1
            else
                SHIFT=2
                while getopts ':fd#n:?g:q' _CSPOC_OPTION $_CSPOC_OPTS 
                do
                    case $_CSPOC_OPTION in
                        f ) :   Force option
                            export _SPOC_FORCE=Y
                        ;;
                        d ) :   Debug level
                            export _DEBUG=$OPTARG
                        ;;
                        n ) :   Target node list
                            export _NODE_LIST=$(print $OPTARG | sed -e"s/['\"]//g")
                        ;;
                        g ) :   Target resource group 
                            export _RES_GRP=$(print $OPTARG | sed -e"s/['\"]//g")
                        ;;
                        q ) :   Suppress output to stdout
                            export _CSPOC_QUIET=YES
                        ;;
                        : ) :   Missing operand - ignored
                        ;;
                        * ) :   Invalid flag specified
                            nls_msg -2 -l ${cspoc_tmp_log} 4 13 \
                                "%s: Invalid C-SPOC flag [%s] specified.\n" \
                                "$_CMD" "$_CSPOC_OPTION"
                            print "$_USAGE"
                            exit 2
                        ;;
                    esac
                done
            fi
            #
            :   Validate required and mutually exclusive CSPOC operands
            #
            if [[ $_GOPT == "no" && -n $_RES_GRP ]]
            then
                #
                :   Is "-g" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 60 \
                    "%s: C-SPOC -g flag is not allowed for this command.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_NOPT == "no" && -n $_NODE_LIST ]]
            then
                #
                :   Is "-n" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 61 \
                    "%s: C-SPOC -n flag is not allowed for this command.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_GOPT == "req" && $_NOPT == "req" ]] && \
                 [[ -z $_RES_GRP && -z $_NODE_LIST ]]
            then    
                #
                :   Check for "-g" or "-n" present when one
                :   or the other is required
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 62 \
                    "%s: Either the '-g' or the '-n' C-SPOC flag must be specified.\n" "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ -n $_RES_GRP && -n $_NODE_LIST ]]
            then
                #
                :   Check that both "-g" and "-n" are not specified together
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 63 \
                    "%s: C-SPOC -g and -n flags are mutually exclusive.\n" \
                    "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_NOPT != "req" && $_GOPT == "req" && -z $_RES_GRP ]]
            then
                #
                :   Is only "-g" allowed
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 64 \
                    "%s: C-SPOC -g flag is required.\n" "$_CMD"
                print "$_USAGE"
                return 2
            elif [[ $_GOPT != "req" && $_NOPT == "req" && -z $_NODE_LIST ]]
            then
                #
                :   Is only "-n" required
                #
                nls_msg -2 -l ${cspoc_tmp_log} 4 65 \
                    "%s: C-SPOC -n flag is required.\n" "$_CMD"
                print "$_USAGE"
                return 2
            fi
            shift $SHIFT
        elif [[ "$THIS_FLAG" != -* ]]
        then
            #  AIX COMMAND ARGUMENT THAT IS NOT AN OPTION FLAG
            #  NEED TO ACCOMODATE OPTIONS THAT MAY OR MAY NOT HAVE AN ARGUMENT.
            #  IF OPT_ARG DOESN'T START WITH A '-' ITS AN ARGUMENT OTHERWISE
            #  CONSIDER IT TO BE THE NEXT OPTION
            let _NUM_CMD_ARGS=$_NUM_CMD_ARGS+$#
            TMP_FLAG=""
            while (( $# > 0 ))
            do
                case "$1" in
                    -*) TMP_FLAG=$(echo $1 | cut -c1-2)
                        _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$TMP_FLAG"
                        TMP_ARG1=$(echo $1 | cut -c3-)
                        if [[ -n $TMP_ARG1 ]] 
                        then
                            TMP_ARG1="$(print -- $TMP_ARG1 |\
                                        clencodearg $_ENCODE_ARGS)"
                            _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$TMP_ARG1"
                            TMP_FLAG=""
                        fi
                    ;;
                    *) TMP_ARG2="$(print -- $1 | clencodearg $_ENCODE_ARGS)"
                       _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}${TMP_ARG2}
                       if [[ -z $TMP_FLAG ]]
                       then
                           _NON_FLG_ARGS=${_NON_FLG_ARGS:+"${_NON_FLG_ARGS} "}"${TMP_ARG2}"
                       fi
                       TMP_FLAG=""
                esac
                shift
            done
            break
        else	# COME INTO HERE WITH $THIS_FLAG and $THIS_ARG SET
            ARG_CHECK=Y
            ARG_NEXT=""
            while [[ -n $ARG_CHECK ]]
            do
                # NOW CHECK IF WE STILL HAVE MORE FLAGS TO PROCESS
                [[ -z $THIS_ARG ]] && ARG_CHECK=""
                if print -- "$_OF_MB $_MF_MB" | grep -- "$THIS_FLAG" > /dev/null
                then
                    # THIS IS A MULTI-BYTE FLAG
                    if [[ -z $THIS_ARG ]]
                    then
                        ( print -- "$_OF_NA $_MF_NA" | grep -- "$THIS_FLAG" > /dev/null ) || \
                        {
                            # THIS FLAG REQUIRES AN ARGUMENT
                            nls_msg -2 -l ${cspoc_tmp_log} 4 19 \
                                "%s: Invalid option [%s].\n" "$_CMD" "$1"
                            print "$_USAGE"
                            exit 2
                        }
                    fi
                    # VALID AIX COMMAND MULTI-BYTE OPTION (WITHOUT AN ARGUMENT)
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG$THIS_ARG"
                    shift
                    ARG_CHECK=""	# Disable further processing of $THIS_ARG as flags
                elif print -- "$_OF_WA $_MF_WA" | grep -- "$THIS_FLAG" > /dev/null
                then
                    # THIS IS A FLAG THAT REQUIRES AN ARGUMWENT
                    # HANDLE OPTIONAL SPACE BETWEEN FLAG AND ITS ARG
                    if [[ -z $THIS_ARG && -z $ARG_NEXT ]]
                    then
                        THIS_ARG=$2		# THERE WAS A SPACE
                        SHIFT=2
                    else
                        SHIFT=1		# THERE WAS NO SPACE
                    fi
                    # NOW VALIDATE THAT WE HAVE AN ARG AND THAT IT IS VALID
                    if [[ -z $THIS_ARG || $THIS_ARG == -* ]]
                    then
                        # IF THERE IS NO ARG THEN CHECK IF FLAG MAY BE SPECFIED WITHOUT ONE
                        print -- "$_OF_NA $_MF_NA" | grep -q -- "$THIS_FLAG" ||\
                        {
                            # THIS FLAG REQUIRES AN ARGUMENT
                            nls_msg -2 -l ${cspoc_tmp_log} 4 19 \
                            "%s: Option [%s] requires an argument.\n" "$_CMD" "$1"
                            print "$_USAGE"
                            exit 2
                        }
                    fi
                    # VALID AIX COMMAND OPTION WITH AN ARGUMENT
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG $(print -- $THIS_ARG | clencodearg $_ENCODE_ARGS)"
                    shift $SHIFT
                    # Disable further processing of $THIS_ARG as flags
                    ARG_CHECK=""
                elif print -- "$_OF_NA $_MF_NA" | grep -q -- "$THIS_FLAG"
                then
                    # THIS IS A FLAG THAT DOES NOT TAKE AN ARGUMENT
                    _CMD_ARGS=${_CMD_ARGS:+"${_CMD_ARGS} "}"$THIS_FLAG"
                    # IF THIS FLAG WAS OBTAINED FROM $THIS_FLAG THEN WE WANT TO
                    # SHIFT. IF IT WAS OBTAINED FROM $THIS_ARG THEN WE DONT
                    [[ -z $ARG_CHECK ]] && shift
                    # THIS FLAG DOES NOT TAKE AN OPTION ARGUMENT SO ASSUME
                    # THAT "$THIS_ARG" SPECIFIES MORE FLAGS TO PROCESS.
                    if [[ -n $THIS_ARG ]]
                    then
                        # GET THE NEXT FLAG, ADJUST $THIS_ARG,
                        # AND KEEP PROCESSING.
                        X=${THIS_ARG#?}
                        THIS_FLAG="-${THIS_ARG%$X}"
                        THIS_ARG=$X
                        ARG_NEXT=Y
                    fi
                else
                    nls_msg -2 -l ${cspoc_tmp_log} 4 26 \
                    "%s: Invalid option [%s].\n" "$_CMD" "$1"
                    print "$_USAGE"
                    exit 2
                fi
            done
        fi
    done
    ##
    # PERFORM CHECKING OF THE AIX COMMAND FLAGS
    ##
    # CHECK FOR REQUIRED NUMBER OF NON-FLAG ARGUMENTS
    if (( ${_NUM_CMD_ARGS:-0} < ${_NUM_ARGS_REQ:-0} ))
    then
        nls_msg -2 -l ${cspoc_tmp_log} 4 27 \
            "%s: Missing command line arguments.\n" "$_CMD"
        print "$_USAGE"
        return 2
    fi
    # THIS IS WHERE WE CHECK FOR MANDATORY FLAGS, MUTUALLY EXCLUSIVE FLAGS,
    # AND MUTUALLY REQUIRED FLAGS
    # CHECK FOR MUTUALLY REQUIRED FLAGS
    # FOR EACH GROUP OF FLAGS SPECIFIED IN $MUTREQ_FLAGS WE WILL COUNT HOW
    # MANY WE NEED AND HOW MANY ARE GIVEN ON CMD LINE.  IF THESE VALUES ARE
    # NOT EQUAL PRINT AN ERROR AND RETURN NON-ZERO
    typeset -i CNT=0 N=0
    for GROUP in $MUTREQ_FLAGS
    do
        # GET A COUNT OF HOW MANY FLAGS IN THIS GROUP
        print -n $GROUP | wc -c | read N
        integer CNT=0
        F=""
        while [[ -n $GROUP ]]
        do
            # GET THE NEXT FLAG IN THE GROUP
            A=${GROUP#?}
            B=${GROUP%"$A"}
            GROUP=$A
            # IF THIS FLAG IS USED INCREMENT THE COUNTER
            if [[ "$(print -- $_CMD_ARGS | grep -- '-'${B})"' ' != ' ' ]]
            then
                (( CNT = CNT + 1 ))
            fi
            F=${F:+"$F, "}"-"$B
        done
        # VERIFY THAT THE COUNTER EQUALS THE TOTAL NUMBER OF FLAGS IN THE GROUP
        if (( $CNT != $N ))
        then
            print "$_CMD: One or more flags [$F] were not specified."
            print "$_CMD: Specifying any one of these flags requires the others."
            return 2
        fi
    done
    # CHECK FOR MUTUALLY EXCLUSIVE FLAGS
    # FOR EACH GROUP OF FLAGS SPECIFIED IN $MUTEX_FLAGS WE WILL COUNT HOW
    # MANY ARE GIVEN ON CMD LINE.  IF MORE THAN ONE IS GIVEN THEN PRINT
    # AN ERROR AND RETURN NON-ZERO
    for GROUP in $MUTEX_FLAGS
    do
        # GET A COUNT OF HOW MANY FLAGS IN THIS GROUP
        integer CNT=0
        F=""
        while [[ -n $GROUP ]]
        do
            # GET THE NEXT FLAG IN THE GROUP
            A=${GROUP#?}
            B=${GROUP%"$A"}
            GROUP=$A
            # IF THIS FLAG IS USED INCREMENT THE COUNTER
            if [[ -n "$(print -- $_CMD_ARGS | grep -- '-'${B})" ]]
            then
                (( CNT = CNT + 1 ))
            fi
            F=${F:+"$F, "}"-"$B
        done
        # VERIFY THAT THE COUNTER EQUALS THE TOTAL NUMBER OF FLAGS IN THE GROUP
        if (( $CNT > 1 ))
        then
            print "$_CMD: The flags [$F] are mutually exclusive."
            print "$_CMD: Only one of these flags may be specified."
            return 2
        fi
    done
    # CHECK FOR ALL MANDATORY FLAGS
    for X in $_MF_NA $_MF_WA
    do
        # CHECK THAT MANDATORY FLAG IS ON COMMAND LINE
        if [[ -z "$(print -- $_CMD_ARGS | grep -- ${X})" ]]
        then
            # THE FLAG WAS NOT SPECIFIED SO WE MUST FIRST CHECK IF ANOTHER
            # FLAG WAS SPECIFIED THAT IS MUTUALLY EXCLUSIVE WITH THIS ONE.
            for GROUP in $MUTEX_FLAGS
            do
                OK=""
                while [[ -n $GROUP ]]
                do
                    Y=${GROUP#?}
                    Z=${GROUP%"$Y"}
                    GROUP=$Y
                    print -- " $_CMD_ARGS " |\
                        grep -- "-${Z} " > /dev/null && OK=Y
                done
                [[ -n $OK ]] && break
            done
            # "$OK" IS NULL IF NO FLAG IN THIS MUTEX GROUP WAS GIVEN
            if [[ -z $OK ]]
            then
                nls_msg -2 -l ${cspoc_tmp_log} 4 29 \
                "%s: Mandatory option [%s] not specified.\n" "$_CMD" "$X"
                print "$_USAGE"
                return 2
            fi
        fi
    done
    if [[ -n $_DEBUG ]] && (( $_DEBUG >= 3 )) 
    then
        print -- "DEBUG(3): _CMD_ARGS=$_CMD_ARGS"
    fi
    return 0
} # End of "_getopts()"
################################################################################
#
# DESCRIPTION:
#   Updates the C-SPOC logfile
#
################################################################################
function cexit
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering cexit version 1.16.7.9"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME=cexit
            set -x
        fi
    fi
    typeset USAGE="USAGE: cexit <temp_log_file> <return_code>"
    # CHECK USAGE
    (( $# != 2 )) && print "$_CMD: $USAGE"
    typeset TEMP_LOG=$1
    typeset RC=$2
    #
    : Read the HACMPlogs ODM for the pathname of the cspoc.log log file
    : If the ODM is empty or corrupted, use /var/hacmp/log/cspoc.log
    #
    DESTDIR=$(clodmget -q "name = cspoc.log" -f value -n HACMPlogs)
    if [[ -n $DESTDIR ]]
    then
        CSPOC_LOG="$DESTDIR/cspoc.log"
    else
        dspmsg scripts.cat 463 "The cluster log entry for %s could not be found in the HACMPlogs ODM.\n" "cspoc.log"
        dspmsg scripts.cat 464 "Defaulting to log directory %s for log file %s.\n" "/var/hacmp/log" "cspoc.log"
        CSPOC_LOG="/var/hacmp/log/cspoc.log"
    fi
    #
    : CHECK ARGS
    #
    if [[ ! -f ${TEMP_LOG} ]]
    then
        nls_msg -2 -l ${CSPOC_LOG} 4 39 \
            "%s: Unable to open file: %s\n" "${TEMP_LOG}" "$_CMD"
        return 1
    fi
    #
    :  If the log file does not exist, create it.
    #
    if [[ ! -f ${CSPOC_LOG} ]]; then
        touch ${CSPOC_LOG}
    fi
    #
    :  Keep the information in the log file if we have write permission
    #
    if [[ -w $CSPOC_LOG ]]
    then
        cat ${TEMP_LOG} >> $CSPOC_LOG
    fi
    if (( $RC == 0 )) && ( [[ -z $_DEBUG ]] || (( $_DEBUG <= 8 )) ) then
        rm -f ${TEMP_LOG%_*}*
        rm -f /tmp/cel$$_s*.err
        rm -f /tmp/cel$$_s*.out
        rm -f /tmp/cel$$.cache
    fi   
} # End of "cexit()"
################################################################################
#
# _cspoc_verify - Performs verification of a number of CSPOC requirments.
#                 Certain requirements, if not met, produce a hard error
#                 and the routine produces an immediate exit of the script.
#                 Other requirements, if not met, produce soft errors that
#                 result in the routine returning a value of '1'.  The
#                 calling script will then exit unless the CSPOC force flag
#                 has been set.
#
################################################################################
function _cspoc_verify
{
    if [[ -n $_DEBUG ]]
    then
        print "DEBUG: Entering _cspoc_verify version 1.16.7.9 + 20527,842,758"
        if (( $_DEBUG >= 8 ))
        then
            typeset PROGNAME="_cspoc_verify"
            set -x
        fi
    fi
    typeset NODE 
    typeset bad_targets		#   space separated list of unreachable nodes
    typeset CAA_down_nodes	#   target hosts CAA says are down
    typeset CAA_node_name	#   CAA host node name
    integer _RETCODE=0		#   Assume OK until proven otherwise
    typeset BADNODES		#   Space separated list of invalid nodes
    typeset down_ha_nodes	#   target HA nodes CAA says are down
    typeset good_targets	#   target HA nodes that should work
    typeset bad_level_nodes	#   target HA nodes below minimum release level
    if [[ $_CSPOC_CALLED_FROM_SMIT != 'true' ]]
    then
	#
	:   If not called from SMIT, which will surely set things
	:   up correctly, check to make sure target nodes are valid.
	#
        for NODE in $(IFS=, set -- $_TARGET_NODES ; print $*)	
	do
	    #
	    :   Collect a list of given nodes that do not
	    :   show up in the local cluster definition.
	    #
	    if [[ $_CLUSTER_NODES != @(?(*,)$NODE?(,*)) ]]
	    then
		BADNODES=${BADNODES:+$BADNODES" "}$NODE
		nls_msg -2 -l ${cspoc_tmp_log} 4 44 \
		"%s: The node [%s] is not a part of this cluster.\n" "$_CMD" "$NODE"
	    fi
	done
	if [[ -n $BADNODES ]]
	then
	    #
	    :	Remove any invalid node names from the node list
	    #
	    save_targets=""
	    for ha_node in $(IFS=, set -- $_TARGET_NODES ; print $*)
	    do
		if [[ $BADNODES != @(?(* )${ha_node}?( *)) ]]
		then
		    save_targets=${save_targets:+"${save_targets},"}${ha_node}
		fi
	    done
	    _TARGET_NODES=$save_targets
	    if [[ -z $_TARGET_NODES ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because all of the target nodes, %s, are not part of this cluster\n" "$_CMD" $LINENO "$BADNODES"
		exit 1	    #	No valid nodes found
	    else
		_RETCODE=1  #	Continue if 'forced' specified
	    fi
	fi
    fi
    cluster_version=$(clodmget -f cluster_version -n HACMPcluster)
    if [[ -x /usr/lib/cluster/incluster ]] && /usr/lib/cluster/incluster || \
       (( $cluster_version >= 15 )) 
    then
	#
	:   If at a level where CAA is in place, check to see if
	:   CAA can provide information on the state of nodes.
	#
	LC_ALL=C lscluster -m 2>/dev/null | \
	egrep 'Node name:|State of node:' | \
	cut -f2 -d: | \
	paste -d' ' - - | \
	while read CAA_node_name state
	do
	    if [[ -n $CAA_node_name ]]
	    then
		if [[ $state != 'UP' && \
		    $state != @(?(* )NODE_LOCAL?( *)) && \
		    $state != @(?(* )REACHABLE THROUGH REPOS DISK ONLY?( *)) &&  \
		    $state != 'DOWN  STOPPED' ]]
		then
		    #
		    #	The purpose of this check is to avoid long timeouts
		    #	trying to talk to a node known to be dead.
		    #	- The local node is always reachable
		    #	- A stopped node may be reachable; halevel checks below
		    #	- A node reachable only through the repository disk
		    #	  may be reachable: just because CAA declares the 
		    #	  network to be down doesn't mean clcomd can't get 
		    #	  through; hlevel checks below
		    #
		    :   Node $CAA_node_name is 'DOWN' 
		    #
		    CAA_down_nodes=${CAA_down_nodes:+"${CAA_down_nodes} "}${CAA_node_name}
		    #
		    :   Find the PowerHA node name corresponding to the
		    :   $CAA_node_name - the name must be a label on an
		    :   interface on some node.
		    #
		    host_ip=$(LC_ALL=C host $CAA_node_name | cut -f3 -d' ')
		    host_ip=${host_ip%,}
		    if [[ -n $host_ip && $host_ip == @(+([0-9.])|+([0-9:])) ]]
		    then
			down_ha_node=$(clodmget -q "identifier = ${host_ip}" -f nodename -n HACMPadapter)
			if [[ -n $down_ha_node ]] 
			then
			    down_ha_nodes=${down_ha_nodes:+"$down_ha_nodes "}${down_ha_node}
			    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
			    "%s[%d]: The CAA lscluster command indicates that node %s[%s] is \"%s\" and not active.\n" "$_CMD" $LINENO $down_ha_node $CAA_node_name "$state"
			fi
		    fi
		fi
	    fi
	done
    fi
    if [[ -n $down_ha_nodes ]]
    then
	#
	:   CAA says that nodes $down_ha_nodes are not active
	:   Construct a list of the remaining nodes, to use to
	:   check to see if clcomd is running.
	#
	for ha_node in $(IFS=, set -- $_TARGET_NODES ; echo $* )
	do
	    if [[ $down_ha_nodes != @(?(* )${ha_node}?( *)) ]]
	    then
		good_targets=${good_targets:+"${good_targets} "}${ha_node}
	    fi
	done
    else
	#
	:   CAA gives no reason to suspect nodes are not reachable
	#
        good_targets=$(IFS=, set -- $_TARGET_NODES ; echo $* )
    fi
    #
    :   CAA has not ruled out talking to node $good_targets
    #
    if [[ -n $_SPOC_FORCE ]] && /usr/lib/cluster/incluster
    then
	#
	:   It is possible that the target node list contains names
	:   that do not correspond to CAA host names after the CAA
	:   cluster is created.  
	#   Before the CAA cluster is created, all target nodes are
	#   naturally not in a CAA cluster.  Ordinarily, this can be
	#   left to clhaver to find, though it does not distinguish
	#   between nodes it cannot connect to, and nodes that are
	#   that are not in the CAA cluster.  If the force flag was
	#   specified, and we are already in a CAA cluster, 
	:   Silently elide names in the target list that do not 
	:   correspond to CAA host names.
	#
	save_targets=$good_targets
	good_targets=""
	for given_node in $save_targets
	do
	    if cl_query_hn_id -q -i $given_node >/dev/null 2>&1
	    then
		good_targets=${good_targets:+"${good_targets} "}${given_node}
	    else
		print "$(date) ${_CMD}._cspoc_verify[$LINENO]: Given target \"$given_node\" cannot be converted to a CAA host name.  It will be skipped." >> $clutilslog
	    fi
	done
    fi 
    if [[ -n $good_targets ]]
    then
	#
	:	CAA thinks that nodes \"$good_targets\"
	:	are active.  See if clcomd can talk to them, 
	:	and what level of PowerHA is present.
	#
	clhaver -c $_VER $good_targets | \
	while IFS=: read ha_node caa_host VRMF
	do
	    if [[ -z $caa_host ]]
	    then
		#
		:   Add $ha_node, which clhaver cannot communicate to 
		:   through clcomd, to the list of nodes not to try 
		:   to run the command on.
		#
		down_ha_nodes=${down_ha_nodes:+"${down_ha_nodes} "}${ha_node}
	    elif (( $VRMF < $_VER ))
	    then
		#
		:   Add $ha_node to the list of nodes below the minimum
		:   HA release level.
		#
		bad_level_nodes=${bad_level_nodes:+"${bad_level_nodes} "}${ha_node}
	    fi
	done
	if [[ -n $bad_level_nodes ]]
	then
	    #
	    :   Nodes \"$bad_level_nodes\" report that they are running a
	    :   version of PowerHA below the required level $_VERSION
	    #
	    if [[ -z $_SPOC_FORCE ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because the following nodes are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes"
	    elif [[ $bad_level_nodes == $good_targets ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will not be run because all nodes are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes"
	    else
		#
		:   If force was specified, command processing continues
		:   but skips nodes \"$bad_level_nodes\"
		#
		nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		"%s[%d]: The command will be run, but not on the following nodes, which are below the required level %s: %s\n" "$_CMD" $LINENO $_VERSION "$bad_level_nodes" 
	    fi
	    down_ha_nodes=${down_ha_nodes:+"${down_ha_nodes} "}${bad_level_nodes}
	    _RETCODE=1
	fi 
    fi
    if [[ -n $down_ha_nodes ]]
    then
	#
	:   The nodes in \$down_ha_nodes, \"$down_ha_nodes\", are not acceptable
	:   targets for this command, either because CAA says they are down,
	:   or clcomd cannot talk to them, or they are running too far a back
	:   level of PowerHA.  Remove them from the list of C-SPOC target nodes.
	#
	save_targets=""
	for ha_node in $good_targets
	do
	    if [[ $down_ha_nodes != @(?(* )${ha_node}?( *)) ]]
	    then
		save_targets=${save_targets:+"${save_targets} "}${ha_node}
	    fi
	done
	good_targets=$save_targets
	bad_targets=$(IFS=, set -- $down_ha_nodes ; print "$*" )
	if [[ -z $good_targets ]]
	then
	    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
	    "%s[%d]: The command will not be run because all of the target nodes, %s, are not reachable\n" "$_CMD" $LINENO "$bad_targets"
	    exit 1
	elif [[ -n $bad_targets ]]
	then
	    if [[ -z $_SPOC_FORCE ]]
	    then
		if [[ $bad_targets == @(*,*) ]]
		then
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will not be run because the target nodes, %s, are not reachable\n" "$_CMD" $LINENO "$bad_targets"
		else
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will not be run because the target node, %s, is not reachable\n" "$_CMD" $LINENO "$bad_targets"
		fi
		_RETCODE=1
	    else
		if [[ $bad_targets == @(*,*) ]]
		then
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will be run, but not on the unreachable nodes %s\n" "$_CMD" $LINENO "$bad_targets"
		else
		    nls_msg -2 -l ${cspoc_tmp_log} 4 9999 \
		    "%s[%d]: The command will be run, but not on the unreachable node %s\n" "$_CMD" $LINENO "$bad_targets"
		fi
	    fi
	fi
    fi
    _TARGET_NODES=$(IFS=, set -- $good_targets ; print "$*" )
    #
    :   \$_TARGET_NODES, \"$_TARGET_NODES\", is a list of nodes that are 
    :   up, contactable by clcomd, and running a reasonably up to date
    :   level of PowerHA.
    #
    return $_RETCODE
} # End of "_cspoc_verify()"
################################################################################
#
#   Start of main, Main, MAIN
#
################################################################################
if [[ -n $_DEBUG ]]
then
    print "\n[C-SPOC Initialization Started version 1.16.7.9"
fi
_VER=${_VER:-"6100"}
_VERSION=${_VERSION:-"6.1.0.0"}
export CLUSTER_OVERRIDE="yes"   # Allow CAAC commands to run...      710
_CMD=${0##*/}
integer TRY_RC=0
#
: since root is needed to determine node lists and what not - clgetaddr
: we may as well disable everything right here right now.  By putting
: in an explicit check we can provide a more intuitive message rather
: than something about not being able to execute some command later on.
#
if [[ $(whoami) != "root" ]] && ! ckauth PowerHASM.admin
then
    nls_msg -2 -l ${cspoc_tmp_log} 4 52 \
    "%s: All C-SPOC commands require the user to either be root, or have PowerHASM.admin authorization\n" "$_CMD"
    exit 2
fi
#
: Set a default value, unless this script is called from SMIT, in which
: case _CSPOC_MODE will already be defined.  By default, this should determine
: what the request mode type.
#
export _CSPOC_MODE=${_CSPOC_MODE:-"both"}
#
: By default, assume that we are being called from the command line
#
export _CSPOC_CALLED_FROM_SMIT=${_CSPOC_CALLED_FROM_SMIT:-"false"}
#
: Make sure that the _CMD_ARGS variable is visible everywhere
#
export _CMD_ARGS=""
[[ -n $_DEBUG ]] && print "\n[Parsing Command Line Options ... ]"
#
:   Tell clencodearg to skip the special escape processing for '='
#
if [[ $SKIP_EQ_ESC == true ]]
then
    export _ENCODE_ARGS="-e"
else
    export _ENCODE_ARGS=""
fi
_CSPOC_OPT_STR=${_CSPOC_OPT_STR:--}
_OPT_STR=${_OPT_STR:--}
_getopts "$_CSPOC_OPT_STR" "$_OPT_STR" "$@" || exit 1
if [[ -n $_DEBUG ]]
then
    print "_CMD_ARGS=${_CMD_ARGS}"
    print "_NUM_CMD_ARGS=${_NUM_CMD_ARGS}"
    print "_NON_FLG_ARGS=${_NON_FLG_ARGS}"
    print "\n[Getting Cluster Node List ... ]"
fi
#
:   Determine the nodes in the cluster, and the nodes to which this operation
:   aplies.
#
export ODMDIR=/etc/objrepos
_get_node_list || exit 1
_get_target_nodes || exit 1
if [[ -n $_DEBUG ]]
then
    print "_CLUSTER_NODES=${_CLUSTER_NODES}"
    print "\n[Verifying C-SPOC Requirements ... ]"
fi
if [[ -z $clutilslog ]]
then
   clutilslog=$(clodmget -q 'name = clutils.log' -f value -n HACMPlogs)"/clutils.log"
fi
#
:   If not all nodes are reachable, stop now, unless the "force" flag was
:   specified, implying continue despite unreachable nodes
#
_cspoc_verify || {
    [[ -z $_SPOC_FORCE ]] && exit 1
}
if [[ -n $_DEBUG ]]
then
    print "\n[C-SPOC Initialization Completed.]"
    print "DEBUG: Entering ${0##*/}"
    (( $_DEBUG >= 8 )) && set -x
fi
#
:   Include the lvm utilities for physical volumes
#
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2020,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r721 src/43haes/usr/sbin/cluster/cspoc/plans/lvm_utils.cel 1.61.1.8 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1998,2016 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/plans/lvm_utils.cel, 726, 2147A_aha726, Feb 05 2021 09:50 PM 
#
###############################################################################
#
# _get_physical_volumes
#
# Grab the physical volume names from the command line, if any were provided.
# Also keep a record of the physical id's of those volumes from the reference
# node.
#
# Variables used:
#
#    _CMD_ARGS
#
# Variables set:
#
#    _DNAMES    -  space separated list of physical disk names
#    _EDNAMES   -  space separated list of encoded physical disk names
#    _REFNODE   -  the reference node provided by the user with -R
#
###############################################################################
function _get_physical_volumes
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _get_physical_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_get_physical_volumes
	    set -x
	}
    }
    typeset DISKS
    typeset ENODE
    typeset PV
    _DNAMES=""
    ENODE=""
    _REFNODE=""
    #
    :	If the -R switch was provided on the command line to identify a
    :	reference node, pick up that node name
    #
    ENODE=$(print -- $_CMD_ARGS | sed -n 's/.*\-R *\([^ ]*\).*/\1/p')
    [[ -n $ENODE ]] && {
	#
	:   Remove the -R switch, and its argument, from the command line
	:   and save away the reference node.
	#
	_CMD_ARGS=$(print -- $_CMD_ARGS | sed -e 's/\-R *[^ ]*//')
	_REFNODE=$(print -- $ENODE | cldecodearg)
    }
    #
    :	At this point, the expectation is that the command as entered ended in
    :	a list of hdisk names.  These have been collected by cl_init into
    :	_NON_FLG_ARGS so called because they are not preceeded by a flag such
    :	as "-d"
    #
    DISKS=${_NON_FLG_ARGS##+([ ])}          # trim leading blanks
        #
    :	If no disks were provided, a reference node is redundant
    #
    if [[ -z $DISKS ]]
    then
	[[ -n $_REFNODE ]] && {
	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 23 "${_CMD}: No disks provided.  Ignoring -R option.\n" ${_CMD} 
	    _REFNODE=""			    # avoid processing reference node later
	}
    else
        #
	:   If disk names were given, trim them off of the string of the
	:   complete set of arguments to this command.  This is so that when
	:   they have been resolved relative to the reference node, they can
	:   just be appended back onto the string of other arguments.
        #
	_CMD_ARGS=${_CMD_ARGS%% ${DISKS}}
        #
        :   Physical volumes were provided - a reference node is required.
        :   That is, since hdisk names are not guaranteed unique across the
        :   cluster, we have to know on which node are the names meaningful.
        #
	[[ -z $_REFNODE ]] && {
	    nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 22 "${_CMD}: The -R switch is required when providing physical volumes.\n" ${_CMD} 
	    exit 1
	}
        #
        :   Create a space separated list of these physical disk names, in
        :   both encoded and decoded form.
        #
	for PV in $DISKS
	do
	    _EDNAMES=${_EDNAMES:+"${_EDNAMES} "}"${PV}"
	    _DNAMES=${_DNAMES:+"${_DNAMES} "}"$(print -- $PV | cldecodearg)"
	done
    fi
}
###############################################################################
#
# _verify_physical_volumes
#
# Verifies that the physical disks provided on the command line are valid
# for the volume group being operated upon.
#
# Arguments:
#
#    _VG        -  the volume group
#    _CHECK_VG  -  true if we should verify physical volumes against those that
#                  belong to the volume group.
#		-  false if we should verify that the given physical volumes
#		   belong to no volume group
#    _CHECK_ALL -  true if we should determine if the user selected all disks
#                  belonging to the volume group.
#    _NODE	-  Node (typically the reference node) on which the disk names
#		   are valid
#
# Variables used:
#
#    _DNAMES    -  the list of physical disk names
#
# Variables set:
#
#    _PVID_LIST - the list of PVID's for the provided physical disk names.
#    _EDNAMES   -  the list of encoded physical disk names on the node on
#		    which the command will be run
#    _SELECTED_ALL  - set to "true" if _DNAMES contains all the disks in the 
#		      volume group, to "false" otherwise
#    _IMPORT_PVID   - PVID to use to pick up volume group changes across the
#		      cluster
#
###############################################################################
function _verify_physical_volumes
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _verify_physical_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_verify_physical_volumes
	    set -x
	}
    }
    #
    :	Check for proper input
    #
    (( $# < 3 )) && {
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 55 "${_CMD}: wrong number of arguments\n" ${_CMD} 
	exit 1
    }
    #
    :	Check to see if any work is required here
    #
    if [[ -z $_DNAMES && $_CSPOC_MODE == "concurrent" && -z $I_NODES ]] && 
       [[ -z $CA_FLAG ]] 
    then
	#
	:	If the list of disks provided by the user is empty, and this is
	:	a concurrent request, and the volume group is varyed on all nodes,
	:	we do not need to go any further, since we will not have to be doing
	:	any importing.  
	#
	return
    fi
    #
    :	For serial requests, or situations where a concurrent
    :	volume group is not online on all nodes, or where an explicit
    :	corrective action has been requested, an _IMPORT_PVID is still needed.
    #
    typeset _VG="$1"
    typeset _CHECK_VG="$2"
    typeset _CHECK_ALL="$3"
    typeset _NODE="$4"
    typeset _AVL_DISKS=""
    typeset _BAD_DISKS=""
    typeset _SVG
    typeset _CLNODE
    typeset PV
    typeset _D
    typeset _USE_REFNODE
    typeset _DISK=""
    typeset _disk_info=""
    #
    :	Check to see if the list of disks has to be resolved with respect to
    :	the reference node.  If no reference node is given, or if the
    :	reference node is the same as the node on which the command is going
    :	to be run, no such resolution is required.
    #
    if [[ -n $_REFNODE && $_REFNODE != $_NODE ]]
    then
	_USE_REFNODE="true"
    else
	_USE_REFNODE="false"
    fi
    if [[ $_CHECK_VG == "true" ]]
    then
	#
	:   Verify that the physical volumes belong to the volume group
	:   provided
	#
	_SVG=$_VG
    else
	#
	:   Verify that the physical volumes belong to no volume group
	#
	_SVG=None
    fi
    #
    :	If the given disk names in $_DNAMES has to be interpreted with respect
    :	to a reference node, find out what names are in use there.
    #
    if [[ $_USE_REFNODE == "true" ]]
    then
	if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
	then
	    print "DEBUG: Obtaining physical volumes from _REFNODE ($_REFNODE)"
	fi
	#
	:   Get the physical volume information from the reference node
	#
	E_LSPV_CMD=$(print "LC_ALL=C lspv -L" | clencodearg -e)
cel_f2
	(( $TRY_RC != 0 )) && exit 1
	#
	:   Parse the output of the lspv command to find the disk names and
	:   PVIDs on that node
	#
	while read _out_node _out_disk _out_pvid _out_vg _out_rest ; do
	    if [[ $_out_vg == $_SVG && $_out_pvid != [Nn]one ]]
	    then
		#
		:   Add $_out_disk to the list of disks in that volume group
		:   on that node
		#
		_AVL_DISKS=${_AVL_DISKS:+"${_AVL_DISKS} "}$_out_disk
	    fi
	    if [[ $_DNAMES == @(?(* )$_out_disk?( *)) ]]
	    then
		#
		:   Add $_out_disk to the list of PVIDs for the disks provided by 
		:   the user.  This will be used for getting the physical volume 
		:   names on the node where the command will actually be run, if 
		:   it is different from the reference node.
		#
		_PVID_LIST=${_PVID_LIST:+"${_PVID_LIST} "}$_out_pvid
	    fi
	done < $try_out
	#
	:   Save a pointer to the lspv output on this node so that we can get
	:   to it later if we have to.
	#
	_disk_info=$try_out
    fi	
    #
    :	Now, get the list of physical volumes from the node on which we will
    :	run the command.
    #
    if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
    then
	print "DEBUG: Obtaining physical volumes on node $_NODE"
    fi
    TRY_RC=0
    E_LSPV_CMD=$(print "LC_ALL=C lspv -L" | clencodearg -e)
cel_f3
    (( $TRY_RC != 0 )) && exit 1
    #
    :	If we did not have to use the reference node, then set the list of
    :	available volumes here, as well as the list of physical volume ids
    #
    if [[ $_USE_REFNODE == "false" ]]
    then
	#
	:   Parse the output of the lspv command to find the disk names and
	:   PVIDs on that node
	#
	while read _out_node _out_disk _out_pvid _out_vg _out_rest ; do
	    #
	    :	Create a list of the names of the disks in that volume group
	    :	on that node
	    #
	    if [[ $_out_vg == $_SVG && $_out_pvid != [Nn]one ]]
	    then
		_AVL_DISKS=${_AVL_DISKS:+"${_AVL_DISKS} "}$_out_disk
	    fi
	    #
	    :   Create a list of PVIDs for the disks provided by the user.  This
	    :   will be used for getting the physical volume names on the node
	    :   where the command will actually be run, if it is different from
	    :   the reference node.
	    #
	    if [[ $_DNAMES == @(?(* )$_out_disk?( *)) ]]
	    then
		_PVID_LIST=${_PVID_LIST:+"${_PVID_LIST} "}$_out_pvid
	    fi
	done < $try_out
	#
	:   If we are going to use the second set of lspv output, set the
	:   pointer to it, since the temp file will have a different name.
	#
	_disk_info="$try_out"
    fi
    #
    :	If we were not called from SMIT, verify that all the disks passed on
    :	the command line are valid.  SMIT only shows those disks that are
    :	valid, so the user can not provide any that are bad.
    #
    if [[ $_CSPOC_CALLED_FROM_SMIT == "false" ]]
    then
	# 
	:   Collect the names of the given disks that do not show up on the
	:   target node
	#
	for PV in $_DNAMES
	do
	    [[ $_AVL_DISKS != @(?(* )$PV?( *)) ]] && \
		_BAD_DISKS=${_BAD_DISKS:+"${_BAD_DISKS} "}${PV}
	done
	[[ -n $_BAD_DISKS ]] && {
	    if [[ $_SVG == "None" ]] ; then
		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 20 "${_CMD}: Physical volumes ($_BAD_DISKS) are invalid on node $_REFNODE\n" ${_CMD} $_BAD_DISKS $_REFNODE 
	    else
		nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 21 "${_CMD}: Physical volumes ($_BAD_DISKS) are not allocated to volume group $VG\n" ${_CMD} $_BAD_DISKS $VG 
	    fi
	    exit 1
	}
    fi
    # 
    :	Determine the physical volume id to use for importing changes on other
    :	nodes in the cluster.  We want a disk that will be in the volume group
    :	when the operation is complete, and, preferrably, one that was in the
    :	volume group before the operation, too.
    # 
    if [[ $_CHECK_ALL == "true" ]]
    then
	#
	:   Check to see if the user listed all physical volumes in the volume
	:   group
	#
	if [[ -n $_DEBUG ]] && (( $_DEBUG > 1 )) 
	then
	    print "DEBUG: Checking for all disks on command line"
	fi
	#
	:   Check to see if there is any disk in the volume group that was not
	:   in the given list of disks
	#
	for _D in $_AVL_DISKS
	do
	    [[ $_DNAMES != @(?(* )$_D?( *)) ]] && {
		_DISK="$_D"
		break
	    }
	done
	#
	:   The expected use of _CHECK_ALL == true is for operations like
	:   reducevg and unmirrorvg that remove disks from the volume group.
	:
	:    + if all disks have been selected, none can be used for
	:      importvg, and the _SELECTED_ALL flag will indicate this.
	:
	:    + if not all disks have been selected, _DISK will contain that
	:      was not selected, and can be used for importvg once the
	:      reducevg is done
	#
	if [[ -z $_DISK ]]
	then
	    _SELECTED_ALL="true"
	else
	    _SELECTED_ALL="false"
            _IMPORT_PVID=$(grep -w $_DISK $_disk_info | read node disk pvid rest ; print $pvid)
	fi
    else
        #
        :   It was not expected that all disks in the volume group could be
        :   listed - this is not the reducevg case.  Pick an existing disk in
	:   the volume group.  
        #
	_IMPORT_PVID=$(grep -w $_VG $_disk_info | grep -v [Nn]one | read node disk pvid rest ; print $pvid)
	if [[ -z $_IMPORT_PVID && -n $_DNAMES ]]
	then
	    #
	    :	If we had not found any disks in the volume group - which could
	    :	happen on importing a new volume group - pick one of the given
	    :	disks.
	    #
            print $_DNAMES | read _DISK rest
            _IMPORT_PVID=$(grep -w $_DISK $_disk_info | \
                           read node disk pvid rest
                           if [[ $pvid == +([[:xdigit:]]) ]]
                           then
                               print $pvid
                           fi)
        fi
    fi
    #
    :	If no disks were provided, we do not need to go any further
    #
    [[ -z $_DNAMES ]] && 
	return
    rm -f $_disk_info
    #
    :	If we have a PVID_LIST and a reference node, we need to translate 
    :	the physical ids into physical names on the reference node.
    #
    if [[ -n $_PVID_LIST && $_USE_REFNODE == "true" ]] 
    then
	[[ -n $_DEBUG ]] && print "DEBUG: Translating PVID_LIST"
	_EDNAMES=""
        for _P in $_PVID_LIST
        do
	    _DISK=$(grep -w $_P $try_out | read node disk rest ; print $disk)
	    if [[ -n $_DISK ]]
	    then
		#
		:   Here build the encoded list of disk names for the command
		#
		_EDNAMES=${_EDNAMES:+"${_EDNAMES} "}$(print -- $_DISK | clencodearg)
	    fi
        done
    fi
}
###############################################################################
#
# _verify_replicated_volumes
#
# Verifies that the volume group, being operated on, is in a Replicated Resource Group.
#
# Arguments:
#
#    _VG            -  the volume group
#    _CLNODES       -  The nodes where the volume groups would be imported
#    _ACTIVE_NODE   -  the reference node where the VG is varied on
#
###############################################################################
function _verify_replicated_volumes
{
    if [[ -n "$_DEBUG" ]] 
    then
	print "DEBUG: Entering _verify_replicated_volumes version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_verify_replicaed_volumes
	    set -x
	}
    fi
    (( $# != 3 )) && {
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 55 "${_CMD}: wrong number of arguments\n" ${_CMD} 
	exit 1
    }
    typeset _VG="$1"
    typeset _ACTIVE_NODE="$2"
    typeset _CLNODES="$3"
    typeset _REP_VOL
    typeset _PPRC_REPRESOURCE
    typeset _ERCMF_REPRESOURCE
    typeset _SVCPPRC_REPRESOURCE
    typeset _SR_REPRESOURCE
    #
    :	Verify that this Volume Group contains Replicated Volumes.
    :	This is to verify that the changes made at one site will be propagated
    :	to the remote DASD. CSPOC operations will not be allowed if the changes 
    :	will not be known at the remote site.
    #
    # Replicated Volume Types:
    #       IBM PPRC 
    #       IBM GeoMirror
    #       IBM eRCMF
    #       IBM SVC PPRC
    #       EMC SRDF®
    #
    # IBM PPRC  Replicated Volumes 
    # 
    export ODMDIR=/etc/objrepos
    _REP_VOL=$(/usr/es/sbin/cluster/utilities/clodmget -q value="$_VG" -f group -n HACMPresource)
    if [[ -z $_REP_VOL ]]
    then
	#
	:   A volume group that is not in a resource group is not a replicated resource
	#
	return
    fi
    _PPRC_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f PPRC_REP_RESOURCE -n HACMPresource)
    _ERCMF_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f ERCMF_REP_RESOURCE -n HACMPresource)
    _SVCPPRC_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f SVCPPRC_REP_RESOURCE -n HACMPresource)
    _SR_REPRESOURCE=$(/usr/es/sbin/cluster/utilities/clodmget -q group="$_REP_VOL" -f SR_REP_RESOURCE -n HACMPresource)
    if [[ -z "$_PPRC_REPRESOURCE" && -z "$_ERCMF_REPRESOURCE" && -z "$_SVCPPRC_REPRESOURCE" && -z "$_SR_REPRESOURCE" ]]
    then
       #
       : This VG is not a PPRC Replicated Resource of any of the supported tupes
       #
       return 0
    fi
    #
    :	Verify that the cluster is active on the node with 
    :	the Volume Group varied on $_ACTIVE_NODE.
    #
    E_ACTIVE_NODE=$(print $_ACTIVE_NODE | clencodearg)
cel_f4
    if (( $cel_rc >= 1 )) 
    then
	#
	:   Cluster is active on node with $_VG varied on. Allow CSPOC operations.
	:   Lazy update will enable the changes to be made at the remote
	:   site after failover
	#
	return 0
    fi
    #
    :	The cluster is not active on the node with $_VG varied on.   Further
    :	processing depends on the resource type
    #
    if [[ -n "$_PPRC_REPRESOURCE" ]]
    then
	#
	: This is a PPRC Replicated Resource
	: Verify that the cluster is active on the node with 
	: the Volume Group varied on $_ACTIVE_NODE
	#
	res_type="PPRC"
	verify_cmd=/usr/es/sbin/cluster/pprc/utils/cl_verify_pprc_cspoc
    fi   
    if [[ -n "$_SVCPPRC_REPRESOURCE" ]]
    then
	#
	:   This is an SVC PPRC Replicated Resource
	:   Verify that the cluster is active on the node with 
	:   the Volume Group varied on $_ACTIVE_NODE
	#
	res_type="SVC PPRC"
	verify_cmd=/usr/es/sbin/cluster/svcpprc/utils/cl_verify_svcpprc_cspoc
    fi   
    if [[ -n "$_SR_REPRESOURCE" ]]
    then
	#
	:   This is an EMC SRDF® Replicated Resource
	#
	res_type="EMC SRDF®"
	verify_cmd=/usr/es/sbin/cluster/sr/utils/cl_verify_sr_cspoc
    fi   
    if [[ -n $res_type ]]
    then
	#
	:   The Cluster is not active on the node with vg varied on. If the pprc
	:   pair is not in a full-duplex state, changes made on this node may not
	:   be known at the remote ODM. Verify that the CSPOC operations will be 
	:   run on nodes that are on the same site. CSPOC operations should 
	:   succeed in this case.
	#
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 60 "WARNING: $_VG is a $res_type Replicated Resource.\n"  $_VG "$res_type"
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 61 "	 Since the cluster is NOT active on node $_ACTIVE_NODE with $_VG active,\n" $_ACTIVE_NODE $_VG
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 62 "	 the CSPOC operation may not succeed on the remote peers.\n"
	nls_msg -2 -l $cspoc_tmp_log ${_LVM_MSET} 63 "Verifying $res_type pair state ...\n" "$res_type"
	#
	:   Convert the comma separated list of nodes in _CLNODES 
	:   into space separated list SP_CLNODES
	#
	SP_CLNODES=$(IFS=, set -- $_CLNODES ; print $* )
	if ! $verify_cmd $_VG $_ACTIVE_NODE $SP_CLNODES
	then
	    nls_msg -2 -l $cspoc_tmp_log ${_MSET} 9999 "The state of the $res_type pair does not allow the CSPOC operation at this time.\n" $res_type
	    exit 1
	else
	    return 0
	fi 
    fi
}
###############################################################################
#
#
#   Name:	_lv_status
#
#
#   Input:	1. flag for clgetvg - either "-l" or "-f"
#		2. corresponding value, either logical volume or file system,
#		   in encoded form
#
#		Variables used by this function
#
#		_SPOC_FORCE - force flag set
#		_TARGET_NODES - list of nodes from command line on which this
#		    is to be made
#
#
#   Function:	Call clgetvg on each node on which the logical volume
#		operations is going to perform until one reports back the name
#		of the owning volume group.   That gets passed through to
#		_vg_status, which indicates the state of the volume group on
#		each node.
#
#		Note that it is assumed that the logical volume is known on at
#		least one of the nodes in _TARGET_NODES; it is an error for
#		this routine to be invoked with a completely unknown logical
#		volume.
#
#
#   Output:	Variables set by this function
#
#		VG  - encoded name of the owning volume group
#		DVG - decoded (readable) name of the owning volume group
#
#		Note that these have to have been defined by the caller in
#		order for the caller to pick up these values.
#
#
#   Returns:    Normally returns to caller with output variables set
#               On error, will exit with a message; does not return to caller
#
#
################################################################################
function _lv_status
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _lv_status for $1 $(print $2 | cldecodearg) version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_lv_status
	    set -x
	}
    }
    integer TRY_RC=1
    option=$1			    # either '-l' or '-f'
    parameter=$2		    # either logical volume or file system name
    #
    :	Since its only necessary to find the owning volume group once - LV
    :	names assumed to be unique across the cluster - check to see if the
    :	local node is one of the ones that should know about it.  Local tests
    :	are faster
    #
    LOCAL_NODE=$(get_local_nodename)
    if [[ $_TARGET_NODES == @(?(*,)$LOCAL_NODE?(,*)) ]] 
    then
	uu_parm=$(print $parameter | cldecodearg)
	DVG=$(clgetvg $option $uu_parm 2>/dev/null)	# supress any 'not found' msg
	TRY_RC=$?
    fi
    #
    :	If not successfully found locally, look across the rest of the cluster
    #
    if (( $TRY_RC != 0 )) || [[ -z $DVG ]]
    then
	#
	:   Find which VG contains the LV, asking each of the nodes in turn, if
	:   necessary
	#
cel_f5
	read A DVG < $try_out		    # decoded (readable) volume group name
	rm -f $try_out			    # otherwise next call just appends
    fi
    (( $TRY_RC != 0 )) &&		    # No node knows of this logical volume
	exit 1
    VG="$(print $DVG | clencodearg)"	    # encoded volume group name
    #
    :       Determine the activation status of the volume group across the
    :       cluster.  This tells us where to run the command.
    #
    _vg_status
}					    # end _lv_status
###############################################################################
#
#
#   Name:	_vg_status
#
#
#   Input:	Variables used by this function
#
#		VG - volume group name, encoded
#		DVG - volume group name, decoded
#		_SPOC_FORCE - force flag set
#		_TARGET_NODES - list of nodes from command line on which this
#		    is to be made
#
#
#   Function:	Call clresactive on each node on which the volume group
#		operation is going to be performed.  This will pass back
#		status from lsvg.  Provide in CL_NODE a choice for the node to
#		run a command against this volume group.
#
#
#               "-u"    A volume group not known on any node is not an error
#
#
#   Output:	Variables set by this function
#
#		CL_NODE - node on which to run operation against this volume
#			  group
#		_CSPOC_MODE - if set to "evaluate", and the volume group mode
#			  can be determined from the current activation
#			  state or defintions, set to "concurrent" or "shared"
#               VG_ACTIVE - flag indicating type of activation done on CL_NODE
#                           "S" - was already active 
#                           "I" - was originally inactive
#                           "P" - was originally in passive mode
#                           "C" - was selected from the concurrent list
#
#		The following space separated lists:
#
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode (varyonvg -c)
#			  If VG_ACTIVE == C, then CL_NODE is also in this list
#		S_NODES - nodes on which the volume group is vary'd on in serial
#			  mode (varyonvg)
#			  If VG_ACTIVE == S, then CL_NODE is also in this list
#		I_NODES - nodes on which the volume group is inactive (varyoffvg)
#			  If VG_ACTIVE == I, then CL_NODE was removed from this
#			  list
#		P_NODES - nodes on which the volume group is vary'd on in passive
#			  mode (varyonvg -c -P)
#			  If VG_ACTIVE == P, then CL_NODE was removed from this
#			  list
#		O_NODES - nodes on which the volume group is unknown
#		G_NODES - nodes on which the volume group is known
#
#
#   Returns:    Normally returns to caller with output variables set
#               On error, will exit with a message; does not return to caller
#
#
################################################################################
function _vg_status
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_status version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_vg_status
	    set -x
	}
    }
    #
    :   Pick up any passed options
    #
    u_flag=""
    while getopts ":u" option ; do
        case $option in
            u )
                u_flag="true"
                ;;
            * )
		shift $((OPTIND - 1))
                dspmsg scripts.cat 6555 "Option \"$option\" is not valid\n" "$option"
                return 1
                ;;
        esac
    done
    # 
    :	Check all the nodes relevant to this operation, to see what the current
    :	state of the volume group is on those nodes.
    # 
    integer TRY_RC=0
cel_f6
    (( $TRY_RC != 0 )) && 
	exit 1
    #
    :	Collect that state into local variables from the file $try_out, where
    :	it was collected from running clresactive on each node.  The format of
    :	the file is:
    :	"node_name: <status>"
    #
    C_NODES=""                          # clean out leftover values
    S_NODES=""
    I_NODES=""
    P_NODES=""
    O_NODES=""
    G_NODES=""
    while read node status rest ; do	# parse the line of $try_out
	case $status in			# note the status
	    concurrent )
		    type=C
		;;
	    active )
		    type=S
		;;
	    inactive )
		    type=I
		;;
	    passive )
		    type=P
		;;
	    no | * )
		    type=O
		;;
	esac
	#
	:   Add the node name minus the trailing ':' to the appropriate
	:   list:
	:	C_NODES - varyed on in concurrent mode - varyonvg -c
	:		  note that active mode 'varyonvg -c -A' also shows up
	:		  as 'concurrent'
	:	S_NODES - varyed on in normal mode - varyonvg
	:	I_NODES - not varyed on at all - varyoffvg
	:	P_NODES - varyed on in passive mode - varyonvg -c -P
	:	O_NODES - not known on that node - exportvg
	#
	eval ${type}_NODES=\${${type}_NODES:+\$${type}_NODES" "}${node%:}
	if [[ $type != O ]]		# status is not 'unknown'
	then
	    #
	    :	Additionally, keep a list of nodes on which the volume group
	    :	is at least defined, independent of its current state.
	    #
	    G_NODES=${G_NODES:+$G_NODES" "}${node%:}
	fi
    done < $try_out			# line at a time into the read statement
    rm -f $try_out				# otherwise next call just appends
    [[ -n $_DEBUG ]] && (( $_DEBUG >  4 )) && {
        print "DEBUG: Status of the volume group $DVG across nodes $_TARGET_NODES"
        print "DEBUG:   Concurrent = $C_NODES"
        print "DEBUG:   Active = $S_NODES"
        print "DEBUG:   Inactive = $I_NODES"
        print "DEBUG:   Passive = $P_NODES"
        print "DEBUG:   volume group is unknown = $O_NODES"
        print "DEBUG:   volume group is known = $G_NODES"
    }
    #
    :	Some C-SPOC commands work on both concurrent and shared volume groups.
    :	The intent is either flagged through SMIT, or must be determined
    :	dynamically.  If a dynamic determination has not yet been made, see if
    :	we can do so now, based on the known activation status.
    #
    if [[ -z $_CSPOC_MODE || $_CSPOC_MODE == "evaluate" ]] 
    then
	if [[ -n $C_NODES ]]
	then
	    #
	    :   Varyed on in concurrent mode
	    #
	    _CSPOC_MODE="concurrent"
	elif (( 1 < $(print $S_NODES | wc -w) ))
	then
	    #
	    :	Implicitly on in RAID concurrent mode on more than one node
	    #
	    _CSPOC_MODE="concurrent"
	elif [[ -n $S_NODES ]]
	then
	    #
	    :	Ordinary vary on at most one node
	    #
	    _CSPOC_MODE="shared"
	elif [[ -n $P_NODES ]]
	then
	    #
	    :	Passive vary on implies a shared resource
	    #
	    _CSPOC_MODE="shared"
	#
	:   We could not determine the mode from the activation state.  This
	:   would be the case when the volume group was varyed off cluster
	:   wide.  So, check the local ODM to see how its used.  The
	:   correctness of this operation depends on there being no
	:   unsynchronized changes across the cluster.
	#
	elif [[ -n $(odmget "-q name = CONCURRENT_VOLUME_GROUP and value = $DVG" HACMPresource) ]]
	then
	    #
	    :	Used as a concurrent volume group
	    #
	    _CSPOC_MODE="concurrent"
	elif [[ -n $(odmget "-q name = VOLUME_GROUP and value = $DVG" HACMPresource) ]]
	then
	    #
	    :	Used as a shared volume group
	    #
	    _CSPOC_MODE="shared"
	else
	    #
	    :   The volume group is not varied on anywhere, and not in a
	    :   resource group.  Assume shared, since that will work once
	    :   the volume group is varied on.
	    #
	    _CSPOC_MODE="shared"
	fi
    fi					    # end set _CSPOC_MODE
    #
    :	Correction for fast disk takeover
    #
    if [[ $_CSPOC_MODE == "concurrent" ]] && 
       (( 1 == $(print $C_NODES | wc -l) )) &&
       [[ -n $(odmget "-q name = VOLUME_GROUP and value = $DVG" HACMPresource) ]]
    then
	#
	:   An enhanced concurrent volume group used in active/passive
	:   mode for fast disk takeover will show up as being in
	:   concurrent mode on at most one node, but will be listed as
	:   a shared VOLUME_GROUP in HACMPresources
	#
	_CSPOC_MODE="shared"
    fi
    #
    :	Having found the status of the volume group across the cluster, pick
    :	a node that would be most appropriate to run the LVM or file system
    :	command of interest on.  Preferentially pick the local node if
    :	possible, otherwise just pick the first available.
    :
    :	At the end of this processing:
    :	    CL_NODE has the name of the node to use
    :	    VG_ACTIVE has an indication of the volume group currnet state
    #
    LOCAL_NODE=$(get_local_nodename)	    #	find out the local node name
    CL_NODE=""                              #   clean out any left over value
    if [[ -n $C_NODES ]] ; then		    #	in concurrent mode on some nodes
	#
	:   If the volume group is already varyed on in concurrent mode, pick
	:   a node from that list - preferentially, the local node - on which
	:   to run the command.
	#
	if [[ $C_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
	    CL_NODE=$LOCAL_NODE		    #	Use the local node
	else				    #	Local node is not a possibility 
	    echo $C_NODES | read CL_NODE rest #	So just use the first
	fi
	VG_ACTIVE="C"			    #	Picked from concurrent list
    elif [[ -n $S_NODES ]] ; then	    #	In shared mode on some nodes
	#
	:   The volume group can be active - ordinary varyonvg - on one or more
	:   nodes.  One node is the shared volume group case, multiple nodes
	:   would be expected in RAID concurrent mode.  Pick a node from that
	:   list - preferentially, the local node - on which to run the
	:   command.
	:   
	:   Note- it is up to the caller to decide if the operation should be
	:   allowed to proceed if the volume group is used in RAID concurrent
	:   mode on more than one node.  List operations will work, change,
	:   delete or create will not.
	#
	if [[ $S_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
	    CL_NODE=$LOCAL_NODE		    #	Use the local node
	else				    #	Local node is not a possibility 
	    echo $S_NODES | read CL_NODE rest #	So just use the first
	fi
	VG_ACTIVE="S"			    #	Used an active node
    else				    #	Not active anywhere
	#
	:   Since the volume group is currently varyed off, pick a node from
	:   the 'passive' or 'inactive' lists to vary it on.  Preferentially
	:   pick the local node.  The selected node is removed from the list,
	:   so that they remain accurate.
	#
	if [[ -n $P_NODES ]] ; then	    #	Look for passive nodes
	    if [[ $P_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
		CL_NODE=$LOCAL_NODE	    #	Use the local node
		P_NODES=$(echo $P_NODES | tr ' ' '\n' | grep -vw $LOCAL_NODE)
	    else			    #	Local node is not a possibility
		echo $P_NODES | read CL_NODE P_NODES  #	So just use the first
	    fi
	    VG_ACTIVE="P"		    #	Picked from passive list
	elif [[ -n $I_NODES ]] ; then	    #	Look for inactive nodes
	    if [[ $I_NODES == @(?(* )$LOCAL_NODE?( *)) ]] ;then
		CL_NODE=$LOCAL_NODE	    #	Use the local node
		I_NODES=$(echo $I_NODES | tr ' ' '\n' | grep -vw $LOCAL_NODE)
	    else			    #	Local node is not a possibility
		echo $I_NODES | read CL_NODE I_NODES  #	So just use the first
	    fi
	    VG_ACTIVE="I"		    #	Picked from inactive list
	else
            #
            :   For some operations, like importvg, its valid to have a volume
            :   group that is not currently known on any node.  In this case,
            :   the reference node must be valid.
            #
            if [[ $u_flag == "true" ]] ; then    #   unknown volume groups allowed
                VG_ACTIVE="O"                   #   And this is one of them 
                CL_NODE=$_REFNODE               #   the disks should be known here
            else
		#
		:   If for some reason it was not possible to find a node on which
		:   the volume group is or could be brought on line, the operation
		:   stops here.
		#
		nls_msg -2 -l $cspoc_tmp_log 24 6 "no node has access to $DVG\n" $DVG 
		exit 1
	    fi
	fi
    fi					    #	end by volume group state
}					    #	end _vg_status
################################################################################
#
#
#   Name:	_vg_active
#
#   Input:	Variables used by this function
#
#		VG - volume group name, encoded
#		DVG - volume group name, decoded
#		_SPOC_FORCE - force flag set
#		S_NODES - nodes on which the volume group is vary'd on in serial
#			  mode (varyonvg)
#		I_NODES - nodes on which the volume group is inactive (varyoffvg)
#		P_NODES - nodes on which the volume group is vary'd on in passive
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode
#		CL_NODE - node on which the volume group is active or to be
#			  activated
#		VG_ACTIVE - flag indicating type of activation done on CL_NODE
#			    "S" - was already active 
#			    "I" - was originally inactive
#			    "P" - was originally in passive mode
#			    "C" - was selected from the concurrent list
#		_DNAMES - list of physical disk names provided by
#			  _get_physical_volumes for those commands that use them
#
#		"-r"	- Volume group will be used for read/only operations
#			  only (e.g., display) and an _IMPORT_PVID is
#			  unnecessary
#		"-p"	- volume group need only be in passive mode for desired
#			  use
#               "-R"    - Returns 1 to the caller on failure to activate vg
#                         rather exiting. In such case CL_NODE is added to
#                         I_NODES and CL_NODE is emptied.
#
#
#   Function:	Ensure that the volume group is active on one of the nodes
#		with the intent of being able to run a command there; 
#		activate it if it is not currently active and force was
#		specified.  The volume group is activated on CL_NODE, as set
#		by _vg_status.
#
#
#   Output:	Variables set by this function
#
#		EPVID - encoded PVID which can be used for importvg
#
#
#   Returns:	Normally returns to caller with output variables set
#		On error, will exit with a message; does not return to caller
#
#
################################################################################
function _vg_active
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_active version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    set -x
	    typeset PROGNAME=_vg_active
	}
    }
    #
    :   Pick up any passed options
    #
    r_flag=""
    passive_only_flag=""
    typeset R_flag=""
    while getopts ":rRp" option ; do
        case $option in
            r )
                r_flag="true"
                ;;
            R )
                R_flag="true"
                ;;
	    p )
		passive_only_flag="true"
		;;
            * )
                shift $((OPTIND - 1))
                dspmsg scripts.cat 6555 "Option \"$option\" is not valid\n" "$option"
                return 1
                ;;
        esac
    done
    if [[ -z $CL_NODE ]] ; then
	#
	:   If for some reason it was not possible to find a node on which the
	:   volume group is or could be brought on line, the operation stops
	:   here.  CL_NODE was set to $CL_NODE in _vg_status
	#
	nls_msg -2 -l $cspoc_tmp_log 24 6 "no node has access to $DVG\n" $DVG 
        [[ -z $R_flag ]] && exit 1
	return 1
    fi
    if [[ $VG_ACTIVE == I || ( $VG_ACTIVE == P && -z $passive_only_flag ) ]] ; then
	#
	:   If the volume group needs to be brought on line, do so on the
	:   selected node.  clvaryonvg will do the appropriate kind of varyon.
	#
	TRY_RC=0
cel_f7
        (( $TRY_RC != 0 )) && {
            [[ -z $R_flag ]] && exit 1
            I_NODES="$CL_NODE $I_NODES"
            CL_NODE=""
            return 1
        }                                   #   varyon failed
    elif [[ $VG_ACTIVE == I && $passive_only_flag == "true" ]]
    then
	#
	:   If the volume group needs be brought online in passive mode
	:   only, invoke cl_pvo to do so
	#
	TRY_RC=0
cel_f8
        (( $TRY_RC != 0 )) && {
            [[ -z $R_flag ]] && exit 1
            I_NODES="$CL_NODE $I_NODES"
            CL_NODE=""
            return 1
	}
    fi					    #	end varyon
    [[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && 
	print "DEBUG: CL_NODE = $CL_NODE"
    #
    :	If there are inactive nodes whose information will have to be updated,
    :	get a useful PVID.
    #
    if [[ -z $EPVID && -z $r_flag && -z $passive_only_flag && ( -z $_REMOVED_VG || $_REMOVED_VG == "true" ) ]] 
    then
	#
	:   Check the disks, and find a PVID we can use for importvg later, if
	:   it has not been done in a prior check.
	#
	if [[ -z $_IMPORT_PVID ]]
	then
	    _verify_physical_volumes $DVG false false $CL_NODE
	    [[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && {
		print "DEBUG: _IMPORT_PVID = $_IMPORT_PVID"
	    }
	fi
	EPVID=$(echo $_IMPORT_PVID | clencodearg)
    fi
}					    #	end _vg_active
##############################################################################
#
#
#   Name:       _vg_sync
#
#   Input:	function request
#		    sync - synchronize
#		    release - restore volume group to original state
#		If no function request is passed, both functions are performed
#
#		chvg command
#		    lists a chvg command to be run on all nodes which must do
#		    an importvg -L, for those functions not picked up by
#		    importvg -L
#		
#		    This is valid only if the 'sync' function request is
#		    specified
#
#               Variables used by this function
#
#               VG - volume group name, encoded
#               DVG - volume group name, decoded
#               CL_NODE - node on which the volume group is active
#               I_NODES - nodes on which the volume group is inactive
#			  (varyoffvg)
#               P_NODES - nodes on which the volume group is vary'd on in
#			  passive mode (varyonvg -c -P)
#		C_NODES - nodes on which the volume group is vary'd on in
#			  concurrent mode
#               VG_ACTIVE - flag indicating type of activation done on CL_NODE
#                           "S" - was already active 
#                           "I" - was originally inactive
#                           "P" - was originally in passive mode
#			    "C" - was in concurrent mode
#		EPVID - encoded PVID of a disk in the volume group to use for 
#			importvg -L
#		_IMPORT_PVID - decoded PVID of a disk in the volume group to
#			use for importvg -L
#		_REMOVED_VG_ - operation resulted in the deletion of the
#			       volume group
#
#
#   Function:   Synchronize the updated volume group information across the 
#               cluster.   This ensures that the ODM information on each node
#               on which the volume group is defined actually matches what's
#               out on the disks, as modified by the operation.
#
#
#   Output:     None
#
#
#   Returns:    Normally returns to caller 
#               On error, will exit with a message; does not return to caller
#
#
##############################################################################
function _vg_sync
{
    [[ -n $_DEBUG ]] && {
	print "DEBUG: Entering _vg_sync version 1.61.1.8"
	(( $_DEBUG >= 8 )) && {
	    typeset PROGNAME=_vg_sync
	    set -x
	}
    }
        request=$1			    #	function requested by caller
    chvg_cmd=$2			    #	Any chvg command that has to be run
    integer TRY_RC=0		    #	return code from CSPOC operations
    integer SAVE_RC=0		    #	error did not stop operations
    _REMOVED_VG_=${_REMOVED_VG_:="false"}   #	operation deleted volume group
    if [[ -z $request || $request == "sync" ]] &&
       [[ $_REMOVED_VG_ == "false" ]] ; then
	if [[ -n $I_NODES && -n $EPVID ]] ; then
	    #
	    :   There are nodes on which volume group $DVG was inactive, and for
	    :   which the local ODM must be updated to match the volume group.  This
	    :   processing is skipped for nodes which have the volume group varyed on
	    :   in passive mode, since LVM does the updates automatically.
	    #
	    if [[ $VG_ACTIVE == S ]] ; then
		#
		:   The volume group $DVG was brought on line in shared mode -
		:   ordinary varyonvg - remove the reserve so that the other
		:   nodes can read the VGDA and VGSA information from the disks.
		#
cel_f9
		(( $TRY_RC != 0 )) && 
		    exit 1
            elif [[ $VG_ACTIVE == I ]] ; then
		# 
		:   The volume group $DVG was originally - before start of this
		:   C-SPOC plan - inactive, so vary it off again.  This will remove
		:   the reserve so that the other nodes can read the VGDA and
		:   VGSA information from the disks.
		# 
cel_f10
                (( $TRY_RC != 0 )) &&
                    exit 1
                #
                :   Get the time stamps in sync
                #
                cl_update_vg_odm_ts -o $DVG "$G_NODES"
                                    VG_ACTIVE=""
	    fi
	    #
	    :	Have each of the inactive nodes run "importvg -L" followed by
	    :	any needed chvg cmmand to update the local ODM, and update the
	    :	local HACMP timestamps for this volume group.
	    #
	    update_cmd="clupdatevg $DVG $_IMPORT_PVID"
	    #---------------------------------------------------
            :   - if remote script debugging is desired - VERBOSE_LOGGING_REMOTE=high
            :   - set up request so the output of the script at the remote node is
            :     saved in /var/hacmp/log/cspoc.log.remote
            #---------------------------------------------------
            if [[ $VERBOSE_LOGGING_REMOTE == "high" ]]
            then
                update_debug_env="VERBOSE_LOGGING=high"
                update_debug="2>&1 | tee >> /var/hacmp/log/cspoc.log.remote"
                update_cmd="$update_debug_env $update_cmd $update_debug"
            fi
	    if [[ -n $chvg_cmd ]] ; then
		update_cmd="$update_cmd && $chvg_cmd"
	    fi
	    e_update_cmd=$(echo $update_cmd | clencodearg -e)
	    NODE_LIST=$(IFS=, set -- $I_NODES ; print "$*")
cel_f11
	    #
	    :	Even if some nodes failed on the "importvg -L", we still need to
	    :	clean up below, so save the error for later
	    #
	    (( $TRY_RC != 0 )) && 
		SAVE_RC=1
	fi
    fi
    if [[ -z $request || $request == "release" ]] &&
       [[ $_REMOVED_VG_ == "false" ]] ; then
	#
	:   Now that all nodes have updated state, put volume group back $DVG into
	:   into the state we found it in - the state was set by _vg_active -
	:   assuming, of course, that it was not entirely removed in the
	:   operation
	#
	TRY_RC=0
	case $VG_ACTIVE in
	I)  #
	    :	The volume group $DVG was originally inactive.  If it was 
	    :	varyed off up above in the synchronization path, nothing needs
	    :	be done here
	    #
	    if [[ $request == release ]]
	    then
		#
		:   The volume group $DVG was originally inactive.  So, vary it off
		#
cel_f12
		(( $TRY_RC != 0 )) && 
		    exit 1
                #
                :   Get the time stamps in sync
                #
                cl_update_vg_odm_ts -o $DVG "$G_NODES"
                    		if [[ -n $P_NODES || -n $C_NODES || -n $S_NODES ]]
		then
		    #
		    :   On a successful varyoff, set the fence height to allow read only
		    :   access if there are any other nodes that are using this volume 
		    :	group.  This should preserve the volume group from inadvertent
		    :   modification by this node.
		    #
cel_f13
		fi
	    fi
	    ;;
	P)  #	
	    :	The volume group $DVG was originally varyed on in passive mode.  So,
	    :	return it to that mode on $CL_NODE
	    #
	    if [[ $passive_only_flag != "true" ]]
	    then
cel_f14
		(( $TRY_RC != 0 )) && 
		    exit 1
		#
		:   On a successful varyoff, set the fence height to allow read only
		:   access.  This should preserve the volume group from inadvertent
		:   modification by this node.
		#
cel_f15
	    fi
	    #
	    :   On a successful varyoff, set the fence height to allow read only
	    :   access.  This should preserve the volume group from inadvertent
	    :   modification by this node.
	    #
cel_f16
	    ;;
	S)  #
	    :	The volume group $DVG was originally active.  If we removed the reserves
	    :	up above, do another varyon to put them back on node $CL_NODE
	    #
	    if [[ -n $CL_NODE && -n $I_NODES ]]
	    then
cel_f17
		(( $TRY_RC != 0 )) && 
		    exit 1
	    fi
	    ;;
	C)  #
	    :	The volume group $DVG was in concurrent mode, nothing needs be
	    :	done
	    #
	    ;;
        O)  #
            :   The volume group $DVG was originally unknown and subsequently
            :   imported, nothing actually has to be done here
            #
            ;;
		esac
    fi
    if [[ $_REMOVED_VG_ == "true" && -n $I_NODES ]] ; then
	#
	:   The LVM operation resulted in the complete removal of the
	:   volume group $DVG, export it on the inactive nodes to get rid of their
	:   definitions, too.
	#
	NODE_LIST=$(IFS=, set -- $I_NODES ; print "$*")
cel_f18
	(( $TRY_RC != 0 )) && 
	    SAVE_RC=1
    fi
    return $SAVE_RC		#   Pass back any saved return code
}				#   End _vg_sync
[[ -n $_DEBUG ]] && (( $_DEBUG == 9 )) &&
    set -x
#
:   Collect the command line arguments that need to be processed by this
:   script
#
DVG=""					# volume group name
VG=""					# volume group name, encoded
FS_SIZE=""				# file system size
FACTOR=""				# 512, Megabyte, Gigabyte file system size
integer LOG_PARTITIONS=1		# number of partitions in the log
FS_TYPE=""				# file system type
INLINE_LOG=""				# in-line log for JFS2 file system
LOG_NAME=""                             # name of log logical volume
CRFS_PASS_OPTS=""			# modified options passed to crfs
SVG_TYPE=2                              # LVM VG type for scalable VG
#
:   Filter out the options and their values that will be used to create the
:   logical volume that holds the file system.
#
_GET_OPT_ARGS=$(print $_OPT_STR | sed 's/\^/:/g')
while getopts ":${_GET_OPT_ARGS}" gotten $_CMD_ARGS
do
    case $gotten in
	g ) VG=$OPTARG
	#
	:   Owning volume group
	#
	    DVG=$(print $VG | cldecodearg)
	    ;;
	l ) LOG_PARTITIONS=$(print $OPTARG | cldecodearg)
	#
	:   Number of partitions for log logical volume
	#
	    ;;
	m ) _MOUNT_POINT=$OPTARG
	#
	:   Capture mount point in case file system must be mounted
	:   after it is created
	#
	    DMP=$(print $_MOUNT_POINT | cldecodearg)
	    DMP=${DMP%%/}
	    if [[ $DMP != /* ]] 
	    then
		#
		:   If the mount point is not given as an absolute
		:   path name, turn it into one
		#
		PWD=$(pwd)
		if [[ $PWD == / ]] 
		then
		    DMP="/${DMP}"
		else
		    DMP="${PWD}/${DMP}"
		fi
	    fi
	    _MOUNT_POINT=$(print $DMP | clencodearg)
	    CRFS_PASS_OPTS="$CRFS_PASS_OPTS -${gotten} $_MOUNT_POINT"
	    ;;
	a ) ATTRIBUTE=$(print $OPTARG | cldecodearg)
	#
	:   File system type jfs or jfs2 specific attributes
	#
	    if [[ $ATTRIBUTE == @(size=*) ]]
	    then
		#
		:   The file system size is extracted, and used to determine
		:   the size of the owning logical volume
		:   default is 512 byte blocks, and that a suffix of M will be
		:   used for Megabytes and G for Gigabytes.
		#
		FS_SIZE=$(print $ATTRIBUTE | sed -n 's/.*"\(.*\)".*/\1/p' )
	    elif [[ $ATTRIBUTE == @(quota=\"all\") ]]
	    then
		#
		:   Check if user selected "all" for Quota attribute. If yes then replace
		:   it with "userquota,groupquota" as is done in AIX SMIT.
		#
		QUOTAVAL=$(print $ATTRIBUTE | sed 's/quota="all"/quota="userquota,groupquota"/')
		CRFS_PASS_OPTS="$CRFS_PASS_OPTS -a "$(print $QUOTAVAL | clencodearg)
	    else
		if [[ $ATTRIBUTE == @(*INLINE*) ]]
		then
		    INLINE_LOG=INLINE
		fi
		if [[ $ATTRIBUTE == @(logname=*) ]]
		then
		    #
		    :	Pick up any user specified log name
		    #
		    LOG_NAME=$(print $ATTRIBUTE | sed -n 's/.*"\(.*\)".*/\1/p')
		fi
		if [[ $ATTRIBUTE == @(efs=*) ]]
		then
		    #
		    :	Pick up encrypted file system attribute
		    #
		    EFS=$(print $ATTRIBUTE | sed -n 's/.*"\(.*\)".*/\1/p' )	    
		fi
		#
		:   Attributes other than 'size' will be passed along to
		:   the crfs command
		#
		CRFS_PASS_OPTS="$CRFS_PASS_OPTS -${gotten} $OPTARG"
	    fi
	    ;;
	v ) FS_TYPE=$(print $OPTARG | cldecodearg)
	#
	:   File system type
	#
	    #
	    :	The only valid file system types are 'jfs' and 'jfs2'
	    #
	    if [[ $FS_TYPE != @(jfs|jfs2) ]]
	    then
		print $_USAGE
		exit 1
	    fi
	    CRFS_PASS_OPTS="$CRFS_PASS_OPTS -${gotten} $OPTARG"
	    ;;
	A ) CRFS_PASS_OPTS="$CRFS_PASS_OPTS -A $(print "no" | clencodearg)"
	#
	:   The automount option is always forced to 'no'
	#
	    ;;
	F ) FACTOR=$(print $OPTARG | cldecodearg)
	#
	:   Pick up any multiplicative factor - M, G - for file system size
	#
	    if [[ $FACTOR != 'M' && $FACTOR != 'G' ]]	# dummy to keep smit happy
	    then
		FACTOR=""
	    fi
	    ;;
	* ) CRFS_PASS_OPTS="$CRFS_PASS_OPTS -${gotten} $OPTARG"
	#
	:   Any other options are passed along unchanged
	#
	    ;;
    esac
done
#
:   Validate that we have a non-zero and valid file system size
#
if [[ -z $FS_SIZE || $FS_SIZE != +([0-9])?([MG]) ]] ; then
    #
    :	File system size must be a number, optionally followed by a 'M' or a 'G'
    #
    nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 44 \
    "${_CMD}: File system size '$FS_SIZE' is invalid.  It must be a number, optionally followed by a 'M' or a 'G'\n" ${_CMD} $FS_SIZE
    exit 1
else
    #
    :	Separate numeric file system size from a possible 'M' or 'G' suffix
    #
    print $FS_SIZE | sed 's/\([0-9]*\)\([MG]*\)$/\1 \2/' | read FS_SIZE SUFFIX
fi
#
:   Operation should continue, even if some nodes are inaccessable
#
if [[ -n $_SPOC_FORCE ]]
then
   F_OPT=" -f "
else
   F_OPT=""
fi
#
:   Processing for Encrypted File System
#
if [[ $EFS == "yes" ]]
then
    #
    :	Creating an Encrypted File System
    #
    KS_mode=$(clodmget -q "group=EFSKeyStore AND name=mode" -f value -n HACMPLDAP)
    if [[ -z $KS_mode ]]
    then
	#
	:   Location for key storage has not been specified
	#
	nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 50 \
	"${_CMD}: The key storage mechanism for PowerHA SystemMirror support of encrypted file systems has not been configured.  File system $DMP cannot be created as an encrypted file system.  Configure the key storage mechanism, and retry the operation.\n" ${_CMD} $DMP
	exit 1
    else
	#
	:   A key storage mechanism, $KS_mode, has been configured
	#
	if (( $KS_mode == 1 )) 
	then
	    #
	    :	LDAP is to be used for key storage.  Ensure that the LDAP server
	    :	has been specified.
	    #
	    LDAP_server=$(clodmget -q "group=LDAPServer AND name=ServerList" -f value -n HACMPLDAP)
	    if [[ -z $LDAP_server ]]
	    then
		#
		:   No LDAP server has been specified.
		#
		nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 51 \
		"${_CMD}: The key storage mechanism for PowerHA SystemMirror support of encrypted file systems has been set to 'LDAP' but no LDAP server has been configured.  File system $DMP cannot be created as an encrypted file system.  Configure an LDAP server, and retry the operation.\n" ${_CMD} $DMP
		exit 1
	    fi
	elif (( $KS_mode == 2 ))
	then
	    #
	    :	Shared file system is used for key storage.  In this case, 
	    :	the resource group for the new file system can be brought
	    :	online only after the resource group for the key storage.
	    #
	    EFS_RG=$(clodmget -q "value = $DVG" -f group -n HACMPresource)
	    if [[ -z $EFS_RG ]]
	    then
		nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 52 \ 
		"${_CMD}: The key storage mechanism for PowerHA SystemMirror support of encrypted file systems has been set to 'Shared File System'.  This requires that any resource group that holds an encrypted file system be brought on line after the resource group 'EFS_KeyStore'.  However, $DVG, the volume group holding $DMP, is not a member of a resource group.  Add $DVG to some resource group, and retry the operation.\n" ${_CMD} $DVG $DMP $DVG
		exit 1
	    else
		#
		:   If there is not already a dependency, add one
		#
		if ! LC_ALL=C clvt query dependency | grep -w "EFS_KeyStore" | grep -w $EFS_RG | grep -qw START_AFTER 2>/dev/null
		then
		    #
		    :   Set up the 'start after' attribute for $EFS_RG
		    #
		    if ! clvt add dependency START=$EFS_RG AFTER="EFS_KeyStore"
		    then
			nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 53 \
			"${_CMD}: The resource group $EFS_RG containing $DVG and $DMP could not be set to start after 'EFS_KeyStore'.\n" ${_CMD} $EFS_RG $DVG $DMP
			exit 1
		    fi
		fi
	    fi
	else
	    #
	    :	Unsupported key storage mechanism
	    #
	    nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 54 \
	    "${_CMD}: The configured key storage mechanism is not valid.\n" ${_CMD}
	    exit 1
	fi
    fi
fi
#
:   If this command was entered through SMIT, there has already been some
:   validation done to ensure that the parameters are reasonable.  That is
:   not necessarily the case if the command was entered directly.  So, the
:   check below determines that the volume group is not multiply defined
:   across the cluster
#
if [[ "$_CSPOC_CALLED_FROM_SMIT" != "true" ]]
then
    integer N_OF_NODE_GROUPS=0
    #
    :	Go find the lists of nodes on which this volume group is known.
    #
    if ! NODE_GROUPS=$(cllssharedvg $F_OPT -n $_TARGET_NODES -g $DVG 2>&1)
    then
        if [[ -z "$_SPOC_FORCE" ]]
	then
	    exit 1
        fi
    fi
    N_OF_NODE_GROUPS=$(print $NODE_GROUPS | awk '{ if ($NF < 4) print $2; }' | wc -w)
    #
    :	Volume group is multiply defined across the cluster
    #
    if (( $N_OF_NODE_GROUPS > 1 ))
       then
          nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 5 \
          "Volume Group name %s not unique among nodes, or volume group descriptions not consistant across nodes: %s\n" "$DVG" "$_TARGET_NODES"
          exit 1
    fi
    #
    :	Volume group is not known on the cluster
    #
    if (( $N_OF_NODE_GROUPS < 1 ))
    then
	nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 4 \
	    "Volume Group name %s not found on nodes %s\n" \
	    "$DVG" "$_TARGET_NODES"
	exit 1
    fi
fi
#
:   Determine the state of the volume group across the cluster, and come
:   up with a recommendation - CL_NODE - on which to run the command
#
_vg_status
#
:   Find a node on which the volume group is active in order to make the
:   filesystem.  If its not active anywhere, make it active. "CL_NODE"
:   will contain the name of that node.
#
_vg_active
#
:   Volume group $DVG is now active on node $CL_NODE
#
#
:   Check to see if there are sites defined.
#
if (( 1 < $(odmget HACMPsite | grep -c -w 'name =' ) ))
then
    #
    :   There is more than one site defined, so verify that CSPOC operations
    :   on the replicated volumes will be successful on all nodes
    #
    _verify_replicated_volumes $DVG $CL_NODE "$I_NODES"
fi
#
:   Check here to see if mirror pool strictness is specified for this
:   volume group.  If so, any created logical volumes have to be assigned
:   to a mirror pool.
#
mp_strictness=""
mp_name=""
MP_COPY=""
#
:   Do an lqueryvg on $DVG, to see if it could support mirror pools
:   First, check to see if its a scalable volume group.
#
query_cmd=$(print -- 'LC_ALL=C lqueryvg -j -g $(/usr/sbin/getlvodm -v '$DVG') ' | clencodearg -e)
cel_f19
vg_type=$(cut -f2 -d: $try_out)
vg_type=${vg_type##*[[:space:]]}
if [[ $vg_type == $SVG_TYPE ]]
then
    #
    :  If this is a scalable volume group, find out
    :  if the AIX level supports mirror pools
    #
    vg_type=$(is_mp_svg)
fi
if [[ $vg_type == ${SVG_TYPE}mp ]]
then
    #
    :   Volume group $DVG can support mirror pools.
    :   See if it has been configured for super strict
    :   mirror pools.
    #
    MPQ_CMD=$(print -- 'lqueryvg -g $(/usr/sbin/getlvodm -v '$DVG') -e' | clencodearg -e)
cel_f20
    #
    :	Check the retrieved mirror pool strictness value
    #
    mp_strictness=$(grep $CL_NODE $try_out | cut -f2- -d: | cut -f2- -d' ')
    if [[ -n $mp_strictness && $mp_strictness != n ]]
    then
	#
	:   Volume group $DVG has been defined with mirror pool strictness.
	:   Generate an appropriate expression to specify a mirror pool to
	:   put any new logical volumes on.
	#
	LSMP_CMD=$(print -- "LC_ALL=C lsmp -L $DVG" | /usr/es/sbin/cluster/cspoc/clencodearg -e)
cel_f21
		if (( $TRY_RC == 0 ))
	then
	    if [[ $mp_strictness == y ]]
	    then
		#
		:   Pick up the first mirror pool name for strict mirror pools
		#
		mp_name=$(sed -n '/MIRROR POOL:/s/.*MIRROR POOL:[ ]*\([^ ]*\).*/\1/p' $try_out | head -1)
		if [[ -n $mp_name ]]
		then
		    #
		    :   Generated mirror pool copy operand for mklv
		    #
		    MP_COPY='-p '$(print 'copy1='$mp_name | /usr/es/sbin/cluster/cspoc/clencodearg -e)
		fi
	    else
		#
		:   Superstrict mirror pools require a copy in each mirror pool
		#
		integer copies=0
		MP_COPY=""
		for mp_name in $(sed -n '/MIRROR POOL:/s/.*MIRROR POOL:[ ]*\([^ ]*\).*/\1/p' $try_out )
		    do
			(( copies+=1 ))
			MP_COPY=${MP_COPY:+"${MP_COPY} "}'-p '$(print 'copy'$copies'='$mp_name | /usr/es/sbin/cluster/cspoc/clencodearg -e)
		    done
		if (( $copies > 1 ))
		then
		    MP_COPY="$MP_COPY -c $(print $copies | /usr/es/sbin/cluster/cspoc/clencodearg -e)"
		fi
	    fi
	fi
    fi
fi
(( $TRY_RC != 0 )) && {
   _RESTORE_STATE_AND_EXIT 1
}
#
:   The AIX crfs command will create a new logical volume, if one is not
:   passed to it.  While the name chosen will be unique on that node, it
:   would be unique across the cluster only by luck.  So, we generate
:   unique names for logical volume and log logical volume for this
:   filesystem, create those logical volumes, and tell crfs to use them.
:
:   First, collect all the logical names across the cluster
#
E_GET_LV_COMMAND="lsdev -C -c $(print logical_volume | clencodearg) -t $(print lvtype | clencodearg)"
cel_f22
if (( $TRY_RC != 0 ))
then
    #
    :	Any failures likely mean that there are some nodes that could not be
    :	checked.  This makes it impossible to guarantee that a cluster-unique
    :	name will be generated.
    #
    nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 6 \
	"Unable to reach all cluster nodes \n"
    if [[ -n $_SPOC_FORCE ]]			# "force" means do it anyway
    then
	nls_msg -2 -l ${cspoc_tmp_log} ${_MSET} 7 \
	    "Logical volume(s) created may not have unique name(s).\n"
    else
       _RESTORE_STATE_AND_EXIT 1
    fi
fi
#
:   Choose unique lv and loglv names by finding the highest numbered names
:   beginning with "lv" or "loglv" respectively and adding 1
#
integer MAX_LVXX_NUMBER=$(sed -n 's/.* lv\([0-9]*\).*/\1/p' $try_out | sort -n | tail -1)
[[ -z $MAX_LVXX_NUMBER ]] &&
    MAX_LVXX_NUMBER=0
if (( $MAX_LVXX_NUMBER < 9 )) 
then
    typeset -Z2 MAX_LVXX_NUMBER
fi
MAX_LVXX_NUMBER=$(( $MAX_LVXX_NUMBER + 1))
CLSTR_UNIQ_FS_LV=lv${MAX_LVXX_NUMBER} 
integer MAX_LOGLVXX_NUMBER=$(sed -n 's/.* loglv\([0-9]*\).*/\1/p' $try_out | sort -n | tail -1)
[[ -z $MAX_LOGLVXX_NUMBER ]] &&
    MAX_LOGLVXX_NUMBER=0
if (( $MAX_LOGLVXX_NUMBER < 9 )) 
then
    typeset -Z2 MAX_LOGLVXX_NUMBER
fi
MAX_LOGLVXX_NUMBER=$(( $MAX_LOGLVXX_NUMBER + 1))
CLSTR_UNIQ_LOG_LV=loglv${MAX_LOGLVXX_NUMBER}
#
:   Encode the log_lv and lv names thus created
#
E_FS_LV=$(print $CLSTR_UNIQ_FS_LV  | clencodearg)	# lv name
E_LOG_LV=$(print $CLSTR_UNIQ_LOG_LV | clencodearg)	# log lv name
E_JFS=$(print $FS_TYPE | clencodearg )			# jfs or jfs2
DLV=$CLSTR_UNIQ_FS_LV
#
:   Check here to make sure that the mount point is not already in use
#
CHK_CMD=$(print "awk '\$1==\"${DMP}:\"{exit 1}' /etc/filesystems" | clencodearg)
cel_f23
(( $TRY_RC != 0 )) && {
   _RESTORE_STATE_AND_EXIT 1
}
#
:   Obtain the partition size in use on that volume group, so we can
:   determine how many are going to be need in the logical volume we are
:   going to create.  This is required by the fact that logical volumes
:   are defined in terms of number of partitions, and file systems are
:   defined in terms of 512 byte blocks
#
integer PARTITION_SIZE=1
LSVG_CMD=$(print "LANG=C lsvg $DVG" | clencodearg -e)
cel_f24
(( $TRY_RC != 0 )) && {
    #
    :	If the lsvg command above fails, at least attempt to restore the
    :	volume group to its original state before exiting.
    #
   _RESTORE_STATE_AND_EXIT 1
}
#
:   Extract the partition size from the lsvg output
#
integer PARTITION_SIZE=$(sed -n '/PP SIZE:/s/.* *PP SIZE: *\([^ ]*\) *.*/\1/p' $try_out)
#
:   Convert given file system size into 512 byte blocks
#
if [[ $SUFFIX == "M" || $FACTOR == "M" ]]
then
   #
   :   size is specified in Megabytes, convert to 512 byte blocks
   #
   ((FS_SIZE *= 1024*2))
elif [[ $SUFFIX == "G" || $FACTOR == "G" ]]
then
   #
   :   size is specified in Gigabytes, convert to 512 byte blocks
   #
   ((FS_SIZE *= 1024*1024*2))
fi
#
:	Compute number of partitions:
:	partition sizes are measured in megabytes while
:	filesystem sizes are measured in 512 byte blocks
:	there are 2048 512-byte blocks in a megabyte
#
integer DIVISOR=2048*$PARTITION_SIZE
integer NUM_OF_PARTITIONS=$FS_SIZE/$DIVISOR
integer remainder=$FS_SIZE%$DIVISOR
(( $remainder > 0 )) &&
   NUM_OF_PARTITIONS=$(( $NUM_OF_PARTITIONS + 1 ))
E_NUM_PARTITIONS=$(print $NUM_OF_PARTITIONS | clencodearg )
#
:   Except for JFS2 file systems with in-line logs, there must be a log
:   logical volume defined in the volume group.  If an explicit log file
:   name is given, it is just passed through to crfs, which will handle
:   any problems.
#
if [[ $FS_TYPE == jfs || $INLINE_LOG != "INLINE" ]]
then
    #
    :	Check the volume group to see if it contains a log logical volume of
    :	the appropriate type
    #
cel_f25
    #
    :	If there was no log logical volume in that volume group, we have to
    :	create one
    #
    if (( $TRY_RC != 0 )) ||
       [[ ! -s $try_out ]] ||			 # didn't find a loglv name, or
       [[ -n $LOG_NAME && -z $(grep -w $LOG_NAME $try_out) ]] ||    # LOG_NAME is passed in
       [[ -z $(while read node lv type rest ; do # its really not the right type
		    if [[ $type == ${FS_TYPE}log ]] 
		    then
			print $lv
			break
		    fi
		done < $try_out) ]] 
    then
	[[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) &&
	    print "${_CMD}: log logical volume not found -- will create"
	#
	:   Construct an appropriate command to create the log logical
	:   volume, and run it on the command node
	#
        if [[ -n $LOG_NAME && -z $(grep -w $LOG_NAME $try_out) ]]       # when LOG_NAME is passed in but not exist
        then
             print "${_CMD} Failed: Log Logical Volume $LOG_NAME must have been created. Please create $LOG_NAME Or omit Log Logical Volumn name option."
             exit  1
        fi
	TRY_RC=0
	E_JFSLOG=$(print ${FS_TYPE}log | clencodearg )
	E_LOG_PARTITIONS=$(print $LOG_PARTITIONS | clencodearg)
cel_f26
	(( $TRY_RC != 0 )) && {
	    #
	    :	If the mklv command for the log logical volume fails, restore
	    :	the volume group to its original state before exiting.
	    #
	    _RESTORE_STATE_AND_EXIT 1
        }
	[[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && {
	     print "${_CMD}: Log Logical Volume $CLSTR_UNIQ_LOG_LV created"
	     print "${_CMD}:    on node $CL_NODE"
	}
	#
	:   Having created the log logical volume, now format it for use
	:   by the filesystem
	#
	FORMAT_COMMAND="echo y | logform -V $FS_TYPE /dev/$CLSTR_UNIQ_LOG_LV"
	E_FORMAT_COMMAND=$(print $FORMAT_COMMAND | clencodearg)
cel_f27
	(( $TRY_RC != 0 )) && {
	    #
	    :	If the format of the log logical volume fails, restore the
	    :	volume group to its original state before exiting.
	    #
	    _RESTORE_STATE_AND_EXIT 1
        }
	[[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && {
	    print "${_CMD}: Log Logical Volume $CLSTR_UNIQ_LOG_LV formatted"
	    print "${_CMD}:   on node $CL_NODE"
	}
    fi   				# end of create and format log lv
fi					# end of not JFS2 with inline log
#
:   Create the logical volume that will hold the new file system
#
cel_f28
(( $TRY_RC != 0 )) && {
    #
    :	If the mklv for the logical volume to hold the filesystem fails,
    :	restore the volume group to its original state before exiting.
    #
    _RESTORE_STATE_AND_EXIT 1
}
# Retrieving the volume group encryption state
isVG_encrypted=$(LC_ALL=C lsvg $DVG | grep ENCRYPTION | awk -F":" '{print $2}')
# Trimming the encrypted value
isVG_encrypted=${isVG_encrypted##+( )}
isVG_encrypted=${isVG_encrypted%%+( )}
#
:  Showing warning message to the user to enable the logical volume level encryption when encryption is enabled in volume group level.
:  Data encryption enables only when both VG and LV level encryption are enabled.
#
if [[ $isVG_encrypted == "yes" ]];then
      dspmsg -s 32 cspoc.cat 6 "\nWARNING: Encryption for volume group \"%1\$s\" is enabled, but the logical volume \"%2\$s\" is not encrypted.\n\
To enable the encryption for logical volume,\n\
You can run \"%3\$s %2\$s [...]\" or\n\
\"use Change a Logical Volume from %4\$s menu\".\n" $DVG $DLV "clmgr modify lv" "smitty cl_lv"
fi
[[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && {
    print "${_CMD}: File System Logical Volume $CLSTR_UNIQ_FS_LV created"
    print "${_CMD}:   on node $CL_NODE"
}
#
:   Now, we have enough to create the FS on the selected node
#
cel_f29
[[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) && {
    print "${_CMD}: File System $FS_NAME created"
    print "${_CMD}:   on node $CL_NODE"
}
if (( $TRY_RC == 0 ))
then
    #
    :	The crfs was successful.  Now, if the volume group is in a resource
    :	group for which 'mount all filesystems' was specified, mount it now
    #
    if [[ $VG_ACTIVE != @(I|P) ]]		# only already vary'd on
    then
    	if [[ -z $_RES_GRP ]] 		# resource group not specified
	then
	    #
	    :	If we do not already have the resource group, determine it from
	    :	the volume group
	    #
	    _RES_GRP=$(clodmget -q "value=$DVG" -f group -n HACMPresource)
	fi
	[[ -n $_DEBUG ]] && (( $_DEBUG > 4 )) &&
	    print "DEBUG: _RES_GRP = $_RES_GRP\n"
	if [[ -n $_RES_GRP ]]
	then
	    #
	    :   If mount all defined in the resource group, mount the new file
	    :   system.
	    #
	    _MOUNT_ALL=$(odmget -q "group=$_RES_GRP AND name=FILESYSTEM AND value=ALL" \
		       HACMPresource)
	    if [[ -n $_MOUNT_ALL ]]
	    then
cel_f30
	    		(( $TRY_RC != 0 )) && {
		    #
		    :   Despite having successfully created the file system,
		    :   the mount failed.  At least put the volume group back
		    :   into its original state before leaving
		    #
		    _RESTORE_STATE_AND_EXIT 1
		}
                if [[ $FS_TYPE == "jfs2" ]]
                then
                    #
                    :   Turn on mountguard, if it is supported
                    #
                    integer V R M F
                    typeset -Z2 R                       # two digit release
                    typeset -Z3 M                       # three digit modification
                    typeset -Z3 F                       # three digit fix
                    integer VRMF=0
                     LSLPP_CMD=$(print -- 'lslpp -lcqOr bos.rte.filesystem | cut -f3 -d:' | clencodearg)
cel_f31
                    (( $TRY_RC != 0 )) && {
                        _RESTORE_STATE_AND_EXIT 1
                    }
                    cut -f2 -d: $try_out | IFS=. read V R M F
                    VRMF=$V$R$M$F
                    if (( $V == 6 && $VRMF >= 601007000 )) ||
                       (( $V == 7 && $VRMF >= 701001000 )) ||
                       (( $V > 7 ))
                    then
                        #
                        :   AIX is at the right level, try to turn on mountguard
                        #
                        MOUNTGUARD=$(print -- "chfs -a mountguard=yes $DMP" | clencodearg -e)
cel_f32
                    fi
                fi
	    fi
	fi
    fi
    #
    :	If there are passive nodes, they did not pick up the filesystem change,
    :	since LVM only updates its own information across the cluster, not the
    :	filesystem.  So, drive an update here.
    #
    if [[ -n $P_NODES ]] 
    then
	FS_UPDATE_CMD=$(echo "cl_set_vg_fence_height -c $DVG rw ; \
			      varyonvg -n -c -A $DVG ; \
			      imfs -l $CLSTR_UNIQ_FS_LV ; \
			      varyonvg -n -c -P $DVG ; \
			      cl_set_vg_fence_height -c $DVG ro " | \
			clencodearg)
cel_f33
	(( $TRY_RC != 0 )) && {
	    _RESTORE_STATE_AND_EXIT 1
        }
    fi					# end update passive nodes
    #
    :	Update the definitions across the cluster
    #
    _IMPORT_RESTORE_EXIT 0
else
    #
    :	The crfs command failed.  Put the volume group back into its
    :	original state.
    #
    _RESTORE_STATE_AND_EXIT 1
fi

