#!/usr/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2018,2019,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r720 src/43haes/usr/sbin/cluster/sa/db2/sbin/cl_db2preimport.sh 1.23 
#  
# Licensed Materials - Property of IBM 
#  
# Restricted Materials of IBM 
#  
# COPYRIGHT International Business Machines Corp. 2005,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
#  @(#)  7d4c34b 43haes/usr/sbin/cluster/sa/db2/sbin/cl_db2preimport.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM
## 
## NAME:	cl_db2preimport
##
## DESCRIPTION:
##			This script performs prevalidation on the user selected values
##			to ensure the selected instance, database, and other attributes
##			are appropriate for the environment.  This script is always called
##			via SMIT either from the Add an Instance, or via the Mutual Takeover
##			assistant.
##
##			In addition the service IP label network is selected and provided
##			as an argument to the cl_db2import script.
##
## ARGUMENTS:
##			[ -T ] - Perform takeover discovery
##			[ -l ServiceLabel ]
##			[ -p prefix/netmask ]
##			[ -d Database ]
##			[ -i InstanceName ]
##			[ -o PrimaryNode ]
##			[ -n TakeoverNodes ]
##			[ -M ] - Modify Mode
##
##
##---------------------------------------------------------------------------

. /usr/es/lib/ksh93/func_include
version='1.23'


#----------------------------------------------------------------------------
# Global Definitions
#----------------------------------------------------------------------------

# Set the FPATH for all DB2 / HACMP functions
FLIB=/usr/es/lib/ksh93
FPATH=$FLIB/utils:$FLIB/hacmp:$FLIB/db2:$FLIB/db2/vg:$FLIB/util:$FLIB/util/list:$FLIB/aix/:$FLIB/aix/odm/
PATH=$PATH:/usr/es/sbin/cluster/sa/db2/sbin/:/usr/es/sbin/cluster/utilities/:/usr/es/sbin/cluster/:/usr/es/sbin/cluster/sa/sbin/

DB2SA_BIN_DIR=/usr/es/sbin/cluster/sa/db2/sbin
DB2_ETC_PATH=/usr/es/sbin/cluster/sa/db2/etc
TAKEOVER_DISCOVERY=false
FLAGS=""
NETWORK=""
typeset MANUAL_CONFIG=false #Manual Configuration mode
typeset CONFIG_FILE=""

#----------------------------------------------------------------------------
# Functions:
# 	check_userexit
#	check_sharedvgs
#	check_vgs_unique
#	check_inactive_instance		
#	check_service_ip
#	instance_validation

#----------------------------------------------------------------------------
#
# Function:		check_service_ip
#
# Description: 	Determine if the service IP or any IP label is resolvable
#				on the local node (runs host IP label)
#
# Arguments:	ip label - label to check
#
# Returns:		0 - host was resolved
#				1 - host was not resolved
#
function check_service_ip {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset service_ip=$1
	errmsg 7 $SERVICE_LABEL
	host $SERVICE_LABEL >/dev/null 2>&1 || {
		# Failed
		errmsg 200
		return 1
	}
	# Passed
	errmsg 100
}

#----------------------------------------------------------------------------
#
# Function:			check_userexit
#
# Description:		If the user has enabled the userexit option for the
#					DB2 database and logretain is set to RECOVERY, then
#					the user must have provided a value for either the
#					archive logpath, the retrieve logpath or the audit
#					error logpath.  If none of the environment variables
#					are set in the db2profile, or userprofile files for
#					the instance then log an error message and indicate
#					that these values must be provided
#
# Arguments:		instance - scalar name of the DB2 UDB instance to verify
#
# Returns:			0 on success
#					1 on failure
#
function check_userexit {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	instance=$1

	if [[ -z $instance ]]; then
		errmsg 200
		return 1
	fi

	typeset databases=$(KLIB_DB2_disc_get_instance_value $instance "DATABASES")
	typeset userexit
	typeset logretain

	# For each database defined to this instance, validate that the instance 
	# database if configured with logretain=recovery and userexit=on has one
	# of the paths set in the environment such that discovery can determine
	# the volume group associated with recovery.
	#
        for db in $(echo $databases | sed "s/,/ /g"); do
		logretain=$(KLIB_DB2_disc_get_instance_value $instance "DB_CFG_""$db""_LOGRETAIN")
		userexit=$(KLIB_DB2_disc_get_instance_value $instance "DB_CFG_""$db""_USEREXIT")
		if [[ "$logretain" == "RECOVERY" && "$userexit" == "ON" ]]; then
			# One of the variables ARCHIVE_PATH, RETRIEVE_PATH, or AUDIT_ERROR_PATH
			# must be set
			archive_path=$(KLIB_DB2_disc_get_instance_value $instance "DBM_ENV_ARCHIVE_PATH")	
			retrieve_path=$(KLIB_DB2_disc_get_instance_value $instance "DBM_ENV_RETRIEVE_PATH")	
			audit_error_path=$(KLIB_DB2_disc_get_instance_value $instance "DBM_ENV_AUDIT_ERROR_PATH")
			if [[ -z $archive_path && -z $retrieve_path && -z $audit_error_path ]]; then
				errmsg 200
				abort 16 instance $db
			fi
		fi
	done
	errmsg 100
	return 0
}

#----------------------------------------------------------------------------
#
# Name:		check_shared_vgs
#
# Description:
#		Validate that the shared volume groups are accessible from all of the
#		takeover nodes, and that the shared VG is actually shared amongst all
#		of the nodes.
#
# Arguments:
#		instance name - scalar
#		nodes - by reference, list of the cluster nodenames to verify
#		
# Returns:
#		0 - on success
#		1 - on failure
#
function check_shared_vgs {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x

    typeset instance=$1
	typeset -n nodes=$2
    typeset pvids

	typeset homevg=$(KLIB_DB2_disc_get_instance_value $instance "DB2HOME_VG")
	typeset othervgs=$(KLIB_DB2_disc_get_instance_value $instance "OTHERVGS")
	typeset source_node=$(KLIB_DB2_disc_get_instance_value $instance "CLUSTER_NODE")
	typeset -A checked_vgs
	typeset vgs="$homevg $othervgs"

	# if the home volume group wasn't discovered, abort
	if [[ -z $homevg ]]; then
		abort 9 $instance
	fi

	#
        # In some configuration (example: SAP Smart Assist which internally
        # uses DB2 Smart Assist) instance home dir could be on rootvg or
        # non-shared VG. In such configuration a warning msg would be
        # thrown to let user make sure that instance home dir should be
        # configured and available on other nodes.
        #
	[[ "$homevg" == "rootvg" ]] && {
                errmsg 27 "$instance" "rootvg"
                typeset vgs="$othervgs"
        }

	# check each VG to ensure all nodes have access via PVID to the vg
	for vg in $vgs; do

		if [[ -n ${checked_vgs[$vg]} ]]; then
			continue
		fi
		checked_vgs[$vg]=:
		errmsg 8 $vg
	
		# Now check the other nodes specified as an argument to this function
		# at least one node should be capable of sharing this vg.
		for node in $nodes; do
			if [[ "$node" != "$source_node" ]]; then
                		KLIB_DB2_VG_node_pvid_compare $source_node $node $vg || {
					if [[ "$vg" == "$homevg" ]]; then
						errmsg 27 "$instance" "$homevg"
					else
						errmsg 200
						abort 10 "$vg" "$source_node" "$node" "$instance"
					fi
				}
                	fi
        	done
		errmsg 100
	done
	return 0
}

#----------------------------------------------------------------------------
#
# Name:		check_vgs_unique
#
# Description:	Validate that the DB2 instance volume groups are unique
#		meaning no other DB2 instance uses those volume groups
#
# Arguments:	instance name - scalar (instance to validate)
#
# Returns:	0 - on success
#		1 - on failure
#
function check_vgs_unique {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset instance=$1
	typeset -n nodes=$2
        typeset source_node=$(KLIB_DB2_disc_get_instance_value $instance "CLUSTER_NODE")

	typeset inst
	errmsg 21 $instance
	if [[ -z $instance ]]; then
		errmsg 200
		return 1
	fi

	typeset OTHERVGS=$(KLIB_DB2_disc_get_instance_value $instance OTHERVGS)
	typeset HOMEVG=$(KLIB_DB2_disc_get_instance_value $instance DB2HOME_VG)
	
	typeset VGS="$OTHERVGS $HOMEVG"

	for node in $nodes; do
                if [[ "$node" != "$source_node" ]]; then
                        KLIB_DB2_VG_node_pvid_compare $source_node $node $HOMEVG || {
                                typeset VGS="$OTHERVGS"
                                break
                        }
                fi
        done

	for inst in $DB2_INSTANCES; do

		# If this is not our instance, look to see if any of our VGs match
		shared_vgs=
		if [[ "$inst" != "$instance" ]]; then
			OTHERVGS=$(KLIB_DB2_disc_get_instance_value $inst OTHERVGS)
			HOMEVG=$(KLIB_DB2_disc_get_instance_value $inst DB2HOME_VG)

			for vgA in $VGS; do
				for vgB in $HOMEVG $OTHERVGS; do
					if [[ "$vgA" == "$vgB" ]]; then
						shared_vgs="$vgB $shared_vgs"
					fi
				done
			done
			if [[ -n $shared_vgs ]]; then
				errmsg 200
				abort 20 $instance !shared_vgs $inst
			fi
		fi
	done

	errmsg 100
	return 0
}


#----------------------------------------------------------------------------
#
# Name:         check_inactive_instance
#
# Description:  Checks for signs of an inactive DB2 instance
#               if the DB2 DB discovery information is blank for all DB2
#               Databases discovered, then report a warning indicating the
#               user may need to add any additional volume groups beyond the
#               instance home volume group.
#
# Arguments:    instance - instance name
#
# Returns:      1 - the instance is inactive
#               0 - the instance was active
#
function check_inactive_instance {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
    typeset instance="$1"
    [[ -z $instance ]] && return 1

	typeset databases=$(KLIB_DB2_disc_get_instance_value $instance DATABASES)
    [[ -z $databases ]] && return 1

    typeset -i nonempty_vals=0
    for db in $(echo $databases | sed "s/,/ /g"); do
    	for val in $DB2_DB_VARIABLES; do
        	token="DB_CFG_""$db""_""$val"""
            dbval=$(KLIB_DB2_disc_get_instance_value $instance $token)
            [[ -n $dbval ]] && (( nonempty_vals += 1 ))
        done
    done

    # There are no DB2 DB online, thus the instance must be offline
    # report this as a warning.
    if (( $nonempty_vals == 0 )); then
		typeset vg=$(KLIB_DB2_disc_get_instance_value $instance DB2HOME_VG)
		errmsg 26 $instance $vg
    fi
}

#----------------------------------------------------------------------------
#
# Name: instance_validation
#
# Description:
#		This script will validate the various components of DB2
#       to ensure the DB2 instance meets the following criteria
#
#       *  DB2 Instance Home Directory Must reside on a filesystem
#          that is on shared storage (shared VG)
#
#		*  The DB2 instance type must be UDB (non-partitioned), and
#		   not DPF.
#		
#		*  Database selected must exist in the selected DB2 instance 
#
# 		*  If the database variable USEREXIT=ON and LOGRETAIN=RECOVERY
#		   then the DBM_ENV_ variables ARCHIVE_PATH, RETRIEVE_PATH and
#		   AUDIT_ERROR_PATH must be set in the db2profile or userprofile
#		   files.  All of this information is contained within the
#		   DB2 discovery file for each instance discovered
#
#       *  Shared volume groups must be accessible on all nodes
#          where a particular instance might reside in the cluster
#          This requires all hdisks for the instance VG(s) to exist
#          on all participating nodes.
#
function instance_validation {
	[[ "$VERBOSE_LOGGING" == "high" ]] && set -x
	typeset instance="$1"
	typeset database_to_monitor="$2"
	typeset -n nodes=$3

	# What type of DB2 instance is this, if its DPF (partitioned), then abort
	errmsg 6 $instance
	typeset dbtype=$(KLIB_DB2_disc_get_instance_value $instance "INSTANCE_TYPE")
	[[ "$dbtype" == "DPF" ]] && {
		errmsg 200
		abort 5 $instance
	}
	errmsg 100

	# Check to see if the DB2 instance is active
    # if the DB2 instance is inactive then we've potentially missed
    # some of the shared volume groups (tablespaces, logs, etc) that
    # the DB2 instance relies on.  Report a warning indicating that additional
	# volume groups may need to be added by hand, if the instance relies on
    # more vgs that just the instance home vg.
    #
    check_inactive_instance $instance

	errmsg 15 " $instance"
	check_userexit $instance 

    #
    # Check to ensure all of the volume groups marked as the home VG
    # have the same set of pvids on one or more cluster nodes the user
    # specified. The pvids do need to be accessible, but the VG is not
    # required to be imported.
    #
    # Using the VG / pvid info generate a list of nodes that
    # the instance could be imported on
    #
    check_shared_vgs $instance nodes

	#
	# Check to make sure volume groups are unique amonst this DB2 instace
	# meaning there are no other discovered DB2 instances with one or more
	# of the same volume groups.
	# 
	check_vgs_unique $instance nodes

	#
	# Check to ensure the database selected exists in the selected DB2 instance
	#
        for db in $(echo $database_to_monitor | sed "s/,/ /g"); do
	    errmsg 17 "$instance" "$db"
	    found=0
            
	    DATABASES=$(KLIB_DB2_disc_get_instance_value $instance DATABASES)
	    for dbname in $DATABASES; do
		if [[ "$dbname" == "$db" ]]; then
		    found=1
                    errmsg 100
		    break
                fi
	    done
	    
	    # If the database was not found, report an error
	    if (( found == 0 )); then
		errmsg 200
		abort 18 $db $instance
	    fi
        done
        
	return 0
}


#----------------------------------------------------------------------------
#
# Name:         importConfigFromFile
#
# Description:
#               This function will read the supplid config file and create HACMP
#               resources to configure DB2 Database for HA.
#
# Arguments:
#               None
#
# Returns:
#               0 - on success
#               1 - on failure
#

importConfigFromFile() {
    [[ "$VERBOSE_LOGGING" == "high" ]] && set -x
     
    APPLICATION_NAME="db2inst1_APP"
    COMPONENT_ID=DB2_NON_DPF_SINGLE

    dse_dir=$(clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t DSE_INSTALL_DIR)
    (( $? != 0 )) && {
        abort 201
    }
    if [[ -z $dse_dir ]]; then
        errmsg 202
    else 
        export DSE_INSTALL_DIR=$dse_dir
    fi

    primary_node=$(clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t PrimaryNode)
    (( $? != 0 )) && {
         abort 201
    }
    primary_node=$(echo $primary_node | cut -d"=" -f2)
    found=0
    for node in $(/usr/es/sbin/cluster/utilities/clnodename)
    do
        [[ $node == $primary_node ]] && {
            found=1
        }
    done
    (( $found == 0 )) && {
        abort 203 $primary_node
    }
    PRIMARY_NODE=$primary_node

    takover_nodes=$(clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t TakeoverNodes)
    (( $? != 0 )) && {
         abort 201
    }
    takover_nodes=$(echo $takover_nodes | cut -d"=" -f2)
    takeover_nodes=$(echo $takover_nodes  | tr ',' ' ')
    found=1
    for tnode in $takeover_nodes
    do
        flag=0
        for node in $(/usr/es/sbin/cluster/utilities/clnodename)
        do
            [[ $node == $tnode ]] && {
                flag=1
            }
        done
        (( $flag == 0 )) && {
            found=0
        }
    done

    (( $found == 0 )) && {
        abort 204 $takover_nodes
    }
    TAKEOVER_NODES=$takeover_nodes
    TAKEOVER_NODES_TOKENIZED=$(echo $takeover_nodes | sed -e "s/ /\:/g")

    inst=$(clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t INSTANCE_NAME)
    (( $? != 0 )) && {
         abort 201
    }

    inst=$(echo $inst | cut -d "=" -f2)
    INSTANCE=$inst

    db=$(clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t DATABASE_NAME)
    (( $? != 0 )) && {
         abort 201
    }
    db=$(echo $db | cut -d"=" -f2)

    DATABASE_NAME=$db

    addrs=$(clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t DB2_SERVICE_IPS | grep "DB2_SERVICE_IP.IPAddress_or_name")
    (( $? != 0 )) && {
         abort 201
    }

    addrs=$(echo $addrs | cut -d"=" -f2)

    #
    : Find out Service Network information from supplied XML file
    #
    NETWORK=$(clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t Service_Network)
    NETWORK=$(echo $NETWORK | cut -d"=" -f2)

    SERVICE_LABEL=$addrs

    FLAGS=-S
    /usr/es/sbin/cluster/sa/db2/sbin/cl_db2smadd -A -n $PRIMARY_NODE > /dev/null 2>&1
    return $?
}


#----------------------------------------------------------------------------
# Main:
#----------------------------------------------------------------------------

# Read in the message catalog entries
. /usr/es/sbin/cluster/sa/db2/cat/cl_db2preimport

# Read in the init functions, abort, require, errmsg, etc.
. /usr/es/lib/ksh93/common_functions.ksh93

# Read in the DB2 definitions
. /usr/es/sbin/cluster/sa/db2/etc/db2_definitions

umask -S u=rw,g=,o=

# Read in the DB2 Discovery Information
[[ -f /usr/es/sbin/cluster/sa/db2/etc/db2.disc ]] && 
	. /usr/es/sbin/cluster/sa/db2/etc/db2.disc

typeset APPLICATION_NAME
typeset COMPONENT_ID
typeset PRIMARY_NODE
typeset TAKEOVER_NODES
typeset INSTANCE
typeset DATABASE_NAME
typeset SERVICE_LABEL
typeset PREFIX
typeset FLAGS
typeset DB2_MANUAL_CONFIG_SCHEMA="/usr/es/sbin/cluster/sa/db2/config/db2_config.xsd"


# By default don't perform takeover discovery, meaning don't discover
# the instances accessible on the takeover node.  This useful for adding
# a single instance when the takeover node was just added to the cluster
# configuration.
#
while getopts C:A:TMFSo:n:i:d:l:f:p:N: option
do
	case $option in
		M) 	# Delete the pre-existing configuration (modify mode)
			FLAGS="-M"
			;;
		A)
			APPLICATION_NAME=$OPTARG
			;;
		T)
	    		# Perform discovery on the takeover node (for Add an instance)
	    		TAKEOVER_DISCOVERY=true
			;;
		C)
			COMPONENT_ID=$OPTARG
			;;
    		o)
            		PRIMARY_NODE=$OPTARG
			;;
        	n)
			TAKEOVER_NODES=$OPTARG
			TAKEOVER_NODES_TOKENIZED=$(echo $TAKEOVER_NODES | sed -e "s/ /\:/g")
            		TAKEOVER_NODES=$(echo $TAKEOVER_NODES | sed -e "s/\:/ /g")
            		;;
        	i)
            		INSTANCE=$OPTARG
			;;
        	d)
            		DATABASE_NAME=$OPTARG
			;;
        	l)
            		SERVICE_LABEL=$OPTARG
			;;
        	p)
            		PREFIX=$OPTARG
			;;
		F)
			# First resource group in mutual configuration
	    		FLAGS="$FLAGS -F"
			;;
		S)
			# Second resource group in mutual configuration
			FLAGS="$FLAGS -S"
			;;
                f) 
                        # Manual Configuration Mode
                        MANUAL_CONFIG=true
                        CONFIG_FILE=$OPTARG
                        ;;
		N)
			#
			: User Provided network
			#
			NETWORK=$OPTARG
			;;
    	esac
done

#
# Before handling anything else, check if we have to configure from XML
#
if [[ $MANUAL_CONFIG == true ]]; then
    if [[ ! -f $CONFIG_FILE ]]; then
        dspmsg -s 51 cluster.cat 26 "Unable to read the configuration file. Please ensure the correct path"
        return 1
    fi
    importConfigFromFile
    ret=$?
    [[ ret && -f /usr/es/sbin/cluster/sa/db2/etc/db2.disc ]] && {
        . /usr/es/sbin/cluster/sa/db2/etc/db2.disc

    }
fi

if [[ -z $APPLICATION_NAME ]]; then
	echo $APPLICATION_NAME is empty
	exit 1
fi


PARTICIPATING_NODES="$PRIMARY_NODE $TAKEOVER_NODES"

# Alert the user we're performing pre-verification of the DB2 instance
errmsg 13 $INSTANCE !PARTICIPATING_NODES

if (( $(echo $SERVICE_LABEL | tr ' ' '\n' | wc -l) > 1 ))
then
	echo "Multiple serviceIP's are detected. DB2 smart assist doesn't support\nmultiple serviceIP's.\nGo to Change/Show RG properties menu and provide single seriveIP only\n"
	exit 1
fi

# Is the service IP label resolveable on the local node?
check_service_ip $SERVICE_LABEL ||
	abort 4 $SERVICE_LABEL

# Perform discovery on the takeover nodes (-T flag)
$TAKEOVER_DISCOVERY && {
	echo
	# Disable summary reporting, but enabled verbose logging
	/usr/es/sbin/cluster/sa/db2/sbin/cl_db2smadd -v \
		-N -n $TAKEOVER_NODES_TOKENIZED || {
		echo $?
		exit $?
	}

	# Re-read the DB2 discovery file after performing discovery
	# on the takeover nodes
	[[ -f /usr/es/sbin/cluster/sa/db2/etc/db2.disc ]] && 
		. /usr/es/sbin/cluster/sa/db2/etc/db2.disc
}

# Perform network discovery after added the takeover
# nodes, determine which is the best network to use

# Is the service IP label already defined to HACMP?
if [[ -z $NETWORK ]]; then
	NETWORK=$(KLIB_HACMP_get_interface_network $SERVICE_LABEL)
fi

# If this service IP label isn't already defined, find a suitable network
# to create the service IP label on, then pass this network to 
# cl_db2import which will create the service IP
if [[ -z $NETWORK ]]; then

	# Find a useable network for this service IP label, this
	# is the same method used by the 2 node configuration assistant
	NETWORK=$(KLIB_HACMP_get_net_with_most_interfaces $SERVICE_LABEL)
	[[ -z $NETWORK || $? != 0 ]] && abort 1 "$SERVICE_LABEL"
else
	# Make sure the service IP label is not an IPAT service
	nettype=$(KLIB_HACMP_get_network_type $NETWORK)
	if [[ "$nettype" != "alias" ]]; then
		abort 11 "$SERVICE_LABEL" "$nettype"
	fi
fi
# Alert the user to the network being used.
errmsg 2 "$NETWORK" "$SERVICE_LABEL"

# Perform instance validation using the user selected nodes
instance_validation "$INSTANCE" "$DATABASE_NAME" PARTICIPATING_NODES
errmsg 14

if [[ -z $PREFIX  ]]
then

        SNTPREFIX=""
else
        SNTPREFIX=" -p $PREFIX "
fi


/usr/es/sbin/cluster/sa/db2/sbin/cl_db2import -a -v     -i $INSTANCE \
                                                        -A "$APPLICATION_NAME" \
                                                        -d $DATABASE_NAME \
                                                        -w $NETWORK \
                                                        -l $SERVICE_LABEL \
                                                        $SNTPREFIX \
                                                        -o $PRIMARY_NODE \
                                                        -n $TAKEOVER_NODES_TOKENIZED \
                                                        -C "$COMPONENT_ID" \
                                                        $FLAGS
