#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG                                                    
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2020,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                      
#                                                                              
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/sa/tds/sbin/clhaws_functions.sh 1.2 
#  
# Licensed Materials - Property of IBM 
#  
# Restricted Materials of IBM 
#  
# COPYRIGHT International Business Machines Corp. 2009,2010 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/sa/tds/sbin/clhaws_functions.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM
#
###############################################################################
#
# This script contains all the definitions, macros, functions, etc that
# are needed by the HAWS package.
#
###############################################################################
#
#  SPECIAL NOTICES
#
#       Please use this script with care.  IBM will not be
#       responsible for damages of any kind resulting from its use.
#       The use of this script is the sole responsibility of
#       the customer and depends on the customer's ability to eval-
#       uate and determine what implications that may be involved.
#
#
#
#
###############################################################################
#


#
# Included by most other utilities to setup various constants, etc.

# These are the different logging levels
HAWS_DEBUG="debug"
HAWS_INFO="info"
HAWS_ERROR="error"
HAWS_WARN="warn"
HAWS_TRACE="trace"
HAWS_LOGONLY="logonly"

#Minimum free space required to run clhaws_import
HAWS_MIN_FREE_SPACE=5120 #KB

# Exit codes
HAWS_EXIT_FAIL=2
HAWS_EXIT_SUCCESS=0

# HACMP commands,paths, etc
HACMP_HOME=/usr/es/sbin/cluster
CLADDSERV=$HACMP_HOME/utilities/claddserv
CLRMSERV=$HACMP_HOME/utilities/clrmserv
CLADDAPPMON=$HACMP_HOME/utilities/claddappmon
CLRMAPPMON=$HACMP_HOME/utilities/clrmappmon
CLADDGRP=$HACMP_HOME/utilities/claddgrp
CLRMGRP=$HACMP_HOME/utilities/clrmgrp
CLADDRES=$HACMP_HOME/utilities/claddres
CLRMRES=$HACMP_HOME/utilities/clrmres
CLLSGRP=$HACMP_HOME/utilities/cllsgrp
CLLSSERV=$HACMP_HOME/utilities/cllsserv
CLLSAPPMON=$HACMP_HOME/utilities/cllsappmon
CLLSRES=$HACMP_HOME/utilities/cllsres
GETLOCALNODENAME=$HACMP_HOME/utilities/get_local_nodename


# Get TDS config type
HAWS_HOME="/usr/es/sbin/cluster/sa/tds"
TMPFILE=$HAWS_HOME"/tds_config_type"
TDS_CONFIG_TYPE=`cat $TMPFILE`
TDS_INSTAL_DIR="/opt/IBM/ldap/"
TDS_PACKAGE_NAME="idsldap"

# Save the name of this program. It's used extensively during logging
progname=$0

###############################################################################
# Function: setup_defaults
###############################################################################
#
# Setup our local variables to contain default values. This function is
# normally called just prior to parsing the command line options.
###############################################################################
setup_defaults() {

    # When this flag is set, try to validate the configuration in HACMP.
    # What this means is to compare what HACMP is configured to do with
    # what we think it should be doing.
    VALIDATE_FLAG="0"

    # When this flag is set, display TRACE messages. Trace messages
    # are used to display what the script is doing during execution.
    TRACE_FLAG="0"

    # When this flag is set, display the DEBUG messages. The output
    # generated when this flag is on can be quite extensive so please
    # use caution. 
    DEBUG_FLAG="0"

    # When this flag is set, display debug information about message
    # processing. We do this because it's sometimes hard to figure out
    # why a particular message is not being displayed correctly.
    MSGDEBUG_FLAG="0"

    # When this flag is set, don't do any actions. Just go through the
    # motions of performing the import, but don't take the actual step of
    # importing anything.
    NOACTION_FLAG="0"
}


###############################################################################
# Function: generic_init
###############################################################################
#
# This is a generic initialization routine that is used by all other
# components in the system. It takes care of 
# Setup our local variables to contain default values. This function is
# normally called just prior to parsing the command line options.
###############################################################################
generic_init() {

    # Establish default behavior
    setup_defaults

    # Load up the message constants so we can use them in the scripts.
    # This script should have been automatically built by the Makefile.
    # The output is taken directly from the source message files so this
    # script will contain the latest messages.
    messages=$HAWS_HOME/sbin/tdshaws_msg.sh
    if [[ ! -f  "$messages" ]]; then
        echo "The file '$messages' is missing! Unable to continue. Bye"
        exit 1
    fi
    . $messages

    check_fs_space
}
# end of generic_init()

###############################################################################
# Function: process_arguments
###############################################################################
#
#   -s Defines what subsystem is being imported. Valid values are
#      'was', 'dm', 'db2' or 'tds'
#
#   -v Sets the validate flag to ON. When set, we'll validate HACMP
#      configuration.
#
#   -d Sets the debug flag to ON. This will print all message.
#
#   -n Set the noaction flag. When set, no actions will be performed, but
#      everything else will still happen. This is useful for showing someone
#      what this script will do without actually doing anything.
#
#   -m Debug the message processing stuff.
#
#   -t Turn tracing on
#
#   -? usage
#
###############################################################################
generic_process_arguments() {
    logmsg HAWS_TRACE "$MSG_PROCARG" "Processing arguments...\n";
    while getopts :mvdns:tI:w:p:A: c 
      do 
        case $c in
            n) NOACTION_FLAG="1"
                logmsg HAWS_TRACE "$MSG_NOACTION_SET" "NOACTION_FLAG is set\n"
                ;;
            v) VALIDATE_FLAG="1"
                logmsg HAWS_TRACE "$MSG_VALIDATE_SET" "VALIDATE_FLAG is set\n"
                ;;
            t) TRACE_FLAG="1"
                logmsg HAWS_TRACE "$MSG_TRACE_SET" "TRACE_FLAG is set\n"
                ;;
            d) DEBUG_FLAG="1"
                VERBOSE_FLAG="1"
                logmsg HAWS_TRACE "$MSG_DEBUG_SET" "DEBUG_FLAG is set\n"
                ;;
            m) MSG_DEBUG_FLAG="1"
                MSGDEBUG_FLAG="1"
                logmsg HAWS_TRACE "$MSG_MSGDEBUG_SET" "MSGDEBUG_FLAG is set\n"
                ;;
			A)
				;;
            \?)  
                logmsg HAWS_ERROR "$MSG_UNKNOWN_OPTION" "Unrecognized command line option specified\n"
		usage
		exit $HAWS_EXIT_FAIL;;   
         esac 
      done 
      MYOPTIND=$OPTIND
      logmsg HAWS_TRACE "$MSG_PROCDONE" "Done processing arguments...\n";
}


###############################################################################
# Function: logmsg
###############################################################################
# This function is used for displaying and logging all messages. It's
# fully internationalized so that the approrpiate catalog will translate
# the messages into whatever language is required
#
# This function is called with the following arguments
#
# $1 = Message type. This will be one of: HAWS_ERROR, HAWS_DEBUG,
# HAWS_INFO or HAWS_WARN
#
# $2 - This is a string that represents the message code. This string
# will be translated into the actual message number before passing
# it to the dspmsg utility
#
# $3 - The default string
#
# $4 - $9 Additional arguments. The number of additional arguments
# is based on the message definition
###############################################################################
logmsg() {
    showit="0"

    # Establish the logfile location. Make sure the logs directory is
    # present
    LOGFILE=`date +"%h-20%y.log"`
    if [[ ! -d $HAWS_HOME/logs ]]; then
	mkdir -p $HAWS_HOME/logs
    fi
    # Depending on what type of message this is, we need to determine
    # if the message needs to be displayed
    if [[ "$1" = "HAWS_TRACE" ]]; then
	if [[ "$TRACE_FLAG" = "1" ]]; then
	    showit="1"
	fi
	if [[ "$DEBUG_FLAG" = "1" ]]; then
	    showit="1"
	fi
    fi
    if [[ "$1" = "HAWS_DEBUG" ]]; then
	if [[ "$DEBUG_FLAG" = "1" ]]; then
	    showit="1"
	fi
    fi
    if [[ "$1" = "HAWS_INFO" ]]; then
	showit="1"
    fi
    if [[ "$1" = "HAWS_ERROR" ]]; then
	showit="1"
    fi
    if [[ "$1" = "HAWS_WARN" ]]; then
	showit="1"
    fi
    if [[ "$1" = "HAWS_LOGONLY" ]]; then
	echo "`date` - \c" >> $HAWS_HOME/logs/$LOGFILE
	dspmsg -s 1 haws.cat $2 "$3" "$4" "$5" "$6" "$7" "$8" "$9" >> $HAWS_HOME/logs/$LOGFILE
	return;
    fi
    if [[ "$showit" = "0" ]]; then
	return
    fi

    # If we have the msgdebug enabled, display what we're doing to 
    # to work with
    if [[ "$MSGDEBUG_FLAG" = "1" ]]; then
	echo "-------------- logmsg called ------------------" | tee -a $HAWS_HOME/logs/$LOGFILE
	echo "\$2 = "$2 | tee -a $HAWS_HOME/logs/$LOGFILE
	echo "\$3 = "$3 | tee -a $HAWS_HOME/logs/$LOGFILE
    fi

    # We have to make sure a message number was provided. It's very
    # common to think you have a valid message id when you really don't.
    # A missing definition will appear as an empty string definition.
    # fix-it-up here. The fix-up is done by setting the id to the
    # well-known message id "MSG_MISSING_MSG_DEFINITION". 
    id=$2
    if [[ "$2" = "" ]]; then
	id=$MSG_MISSING_MSG_DEFINITION
	if [[ $MSGDEBUG_FLAG = "1" ]]; then
	    echo "----> Missing Definition" | tee -a $HAWS_HOME/logs/$LOGFILE
	fi
    fi
    echo "`date` - \c" | tee -a $HAWS_HOME/logs/$LOGFILE
    if [[ $id -eq $MSG_DEBUG_MSG ]]; then
	echo "$3 $4 $5 $6 $7 $8 $9" | tee -a $HAWS_HOME/logs/$LOGFILE
    else
	dspmsg -s 1 haws.cat $id "$3" "$4" "$5" "$6" "$7" "$8" "$9" | tee -a $HAWS_HOME/logs/$LOGFILE
	if [[ $MSGDEBUG_FLAG = "1"  && $2 != "" && $id -ne $MSG_DEBUG_MSG ]] ; then
	    echo "`date` - \c" | tee -a $HAWS_HOME/logs/$LOGFILE
	    dspmsg -s 1 haws.cat 9999 "$3" "$4" "$5" "$6" "$7" "$8" "$9" | tee -a $HAWS_HOME/logs/$LOGFILE
	fi
    fi
}

###############################################################################
# Function: findvg
###############################################################################
#
# Find the volume group given a directory. This is done as follows:
#
# 1. Look at all the mounted filesystems and locate the filesystem that
# contains the specified directory. Extract the logical volume.
#
# 2. Drill into the logical volume and extract the volume group. 
#
###############################################################################

findvg() {

    VG=""
    logmsg HAWS_TRACE  "$MSG_FIND_VG" "Find vg for %s\n" $1
    lv_tmp=`df "$1" 2>/dev/null | tail -1 | awk '{print $1}'`
    echo $lv_tmp|grep ":" >/dev/null && lv_tmp=$(echo $lv_tmp|awk -F: '{print $2}')
    lv=$(lsfs $lv_tmp|tail -1|awk '{print $1}')
    if [[ ! $lv = "" ]] ; then
	logmsg HAWS_TRACE  "$MSG_FOUND_LV" "Found logical volume: %s\n" $lv

        #Find the volume group for this lv now
	l=`basename $lv`
	VG=`lslv $l | awk '{ if (match($5, "GROUP:")) print $6 }'`
	logmsg HAWS_TRACE  "$MSG_FOUND_VG" "Found volume group: %s\n" $VG
    fi

    if [[ $VG = "" ]] ; then
	logmsg HAWS_ERROR "$MSG_VG_NOT_FOUND" "Volume group for directory %s not found.  \nPlease varyon the necessary volume group and mount the file systems.\n" $1
    fi
}


###############################################################################
# Function: parse_webpshere_config
###############################################################################
#
# Parse the websphere configuration files and determine how it's
# configured. WAS_NODE and WAS_CELL variables are set.  This information 
# will be used to create the hacmp scripts.
###############################################################################
parse_websphere_config() {
    logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "Parsing websphere config files...\n"

    # Step one of the process is to determine how many cells are defined.
    # If there is more than one cell defined, then bail unless the user
    # has specified which cell to use in the configuration process

    WAS_PROFILE_DIR=`ps -ef | grep $TDS_INSTALL_DIR/appsrv/profiles | grep -v grep | awk '{ b=split( $0, a, " "); print a[b-3]}'`
    files=`ls -dl $WAS_PROFILE_DIR/cells/* | awk '{ if ( $1 ~ /^d/ ) print $9}'`
    count=0
    for file in $files; do
	lastcell=`basename $file`
	count=`expr $count + 1`
    done
    if [[ $count -eq 0 ]]; then
	logmsg HAWS_ERROR "$MSG_WAS_NO_CELLS" "No WebSphere cells defined.\n"
#	exit $HAWS_EXIT_FAIL
    fi
    if [[ $count -gt 1 && "$WAS_CELL" = "" ]]; then
	logmsg HAWS_ERROR "$MSG_WAS_SPECIFY_CELL" "There are multiple WebSphere cells defined.  Please specify one using the -c option.\n"
#	exit $HAWS_EXIT_FAIL
    fi
    if [[ $count -eq 1 && "$WAS_CELL" = "" ]]; then
	WAS_CELL=$lastcell
    fi

    # Make sure the user specified an existing cell
    found=0
    for file in $files; do
	cell=`basename $file`
	if [[ "$cell" = "$WAS_CELL" ]]; then
	    found=1
	fi
    done
    if [[ $found -eq 0 ]]; then
	logmsg HAWS_ERROR "$MSG_WAS_UNKNOWN_CELL" "You specified an invalid cell name: %s\n" $WAS_CELL
#	exit $HAWS_EXIT_FAIL
    fi
    logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "WebSphere cell $WAS_CELL validated\n"

    # Now that we have the cell name, we need to retrieve the 
    # nodes what comprise this cell. This is fairly easy to do. Just
    # go down the directory heirarchy and look in the 'nodes' subdirectory.
    # For each directory found, look one more level for a node.xml file.
    # If found, then this is a valid node.
    #
    # Try to find the node that matches the name of the current node.
    files=`ls -dl $WAS_PROFILE_DIR/cells/$WAS_CELL/nodes/* | awk '{ if ( $1 ~ /^d/ ) print $9}'`
    count=0
    bfile=""
    for file in $files; do
	bfile=`basename $file`
	if [[ "$WAS_NODE" = "" || "$bfile" = "$WAS_NODE" ]]; then
	    count=`expr $count + 1`
	fi
    done

    # We should have a single node. If we have more than 1, then the
    # user will need to specify the node-name we're working with
    if [[ $count -eq 0 ]]; then
	if [[ "$WAS_NODE" = "" ]] ; then
	    logmsg HAWS_ERROR  "$MSG_WAS_NONODES" "Unable to find a WAS node\n"
	else
	    logmsg HAWS_ERROR  "$MSG_WAS_NODE_NOT_FOUND" "Unable to find WAS node: %s\n" $WAS_NODE
	fi
#	exit $HAWS_EXIT_FAIL
    fi
    if [[ $count -gt 1 ]]; then
	if [[ "$WAS_NODE" = "" ]] ; then
	    logmsg HAWS_ERROR  "$MSG_WAS_TOO_MANY_NODES" "Too many WAS nodes defined.  Please specify one using the -o option \n"
	else
	    logmsg HAWS_ERROR  "$MSG_WAS_TOO_MANY_NODES_MATCHED" "Too many WAS nodes defined that match: %s\n" $WAS_NODE
	fi
#	exit $HAWS_EXIT_FAIL
    fi

    if [[ "$WAS_NODE" = "" ]] ; then
	WAS_NODE="$bfile"
    fi

    # We now have the node
    logmsg HAWS_TRACE  "$MSG_WAS_NODEFOUND" "Found the WAS node: %s\n" $WAS_NODE
    
}


###############################################################################
# Function: get_server_names
###############################################################################
#
# Determine the names of the application server. This is needed so when we 
# query the status of the server, we can specify the name of the 
# application server to monitor
###############################################################################
get_server_names() {
    logmsg HAWS_TRACE $MSG_WAS_GET_SERVERNAME "Getting the WAS server name...\n"

    # Call the Java program that will process the virtualhosts file
    # and return the name of the server. We provide this program the
    # the type of the server requested
    xmlfile=$WAS_PROFILE_DIR/cells/$WAS_CELL/nodes/$WAS_NODE/serverindex.xml
    logmsg HAWS_DEBUG $MSG_PARSE_XMLFILE "Parsing the XML file: %s\n" $xmlfile
    WAS_SERVER_NAME=`$JAVA $JAVACP $GETSERVERNAME $xmlfile APPLICATION_SERVER`
    logmsg HAWS_TRACE $MSG_WAS_FOUND_SNAME "Found the server name as %s\n" $WAS_SERVER_NAME
}


###############################################################################
# Function: create_script_names
###############################################################################
#
# Create the resource group, application server and script names.  Also, 
# create the directories locally and remotely. Also sets the INVOCATION
# type value (mon_mode)
###############################################################################
create_script_names() {
    subsystem=$1
    name=$2
    fallover_node=$3
	tds_instance_name=$4

    mon_mode="longrunning"
    # Create the names

	if [[ "$TDS_CONFIG_TYPE" != "DISTRIBUTED"  &&  "$TDS_CONFIG_TYPE" != "PEERTOPEER" ]]; then
    	#Resource Group Name  - limit to 64 characters
		rgname=$subsystem"rg_"$name"_"$tds_instance_name
	else
    	rgname=$subsystem"rg_"$name
	fi

    if [[ ${#rgname} -gt 64 ]] ; then
		typeset -L32 temp1=$rgname
		rgname=`echo $temp1`
    fi
    logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "RG Name = $rgname\n"

    #Application Server name - limit to 64 characters
	if [[ "$TDS_CONFIG_TYPE" != "DISTRIBUTED"  &&  "$TDS_CONFIG_TYPE" != "PEERTOPEER" ]]; then
		asname=$subsystem"as_"$name"_"$tds_instance_name
	else
    	asname=$subsystem"as_"$name
	fi

    if [[ ${#asname} -gt 64 ]] ; then
		typeset -L64 temp2=$asname
		asname=`echo $temp2`
    fi
    logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "APP SERVER Name = $asname\n"

	if [[ "$TDS_CONFIG_TYPE" != "DISTRIBUTED"  &&  "$TDS_CONFIG_TYPE" != "PEERTOPEER" ]]; then
		#import create script
    	create_script="$HAWS_HOME/scripts/"$subsystem"_"$tds_instance_name"_import_create"
    	#import delete script
    	delete_script="$HAWS_HOME/scripts/"$subsystem"_"$tds_instance_name"_import_delete"
    	#configuration script
    	cfg_script="$HAWS_HOME/config/HAWS_CFG_"$subsystem".cfg"
    	#Application Monitor Script
    	app_monitor_script="$HAWS_HOME/config/HAWS_MON_"$subsystem"_"$tds_instance_name"_"$name".sh"
	else
    	#import create script
    	create_script="$HAWS_HOME/scripts/"$subsystem"_import_create"
    	#import delete script
    	delete_script="$HAWS_HOME/scripts/"$subsystem"_import_delete"
    	#configuration script
    	cfg_script="$HAWS_HOME/config/HAWS_CFG_"$subsystem".cfg"
    	#Application Monitor Script
    	app_monitor_script="$HAWS_HOME/config/HAWS_MON_"$subsystem"_"$name".sh"
	fi

    # If Validating, there is no need to do anything else.
    if [[ $VALIDATE_FLAG -eq 1 ]] ; then
	return
    fi

    # Check for existing Resource Group Name
    if [[ -n $($CLLSGRP 2>/dev/null | grep -w $rgname) ]] ; then
	logmsg HAWS_WARN $MSG_RGNAME_EXISTS "The Resource Group name %s already exists in the cluster.\n" $rgname
    fi

    # Check for existing Application Server Name
    if [[ -n $($CLLSSERV -c 2>/dev/null | cut -d: -f1 | grep -w $asname) ]] ; then
	logmsg HAWS_WARN $MSG_ASNAME_EXISTS "The Application Server name %s already exists in the cluster.\n" $asname
    fi

    #Create script directory
    if [[ ! -f $HAWS_HOME/scripts ]]; then
	mkdir -p $HAWS_HOME/scripts
    fi

    #Create config directory
    if [[ ! -x $HAWS_HOME/config ]]; then
	mkdir -p $HAWS_HOME/config
    fi

    #Create config directory on remote node
	if [[ "$TDS_CONFIG_TYPE" != "DISTRIBUTED"  &&  "$TDS_CONFIG_TYPE" != "PEERTOPEER" ]]; then
    	cl_rsh $fallover_node "mkdir -p $HAWS_HOME/config" > /dev/null 2>&1
    	if [[ $? -eq 0 ]] ; then
			logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "mkdir $HAWS_HOME/config on node $fallover_node"
    	else
			logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "mkdir did not work"
			logmsg HAWS_WARN "$MSG_MKDIR_ERROR" "The directory %s could not be created on node %s.\n" "$HAWS_HOME/config" "$fallover_node"
    	fi
	fi

    #Create scripts directory on remote node
	if [[ "$TDS_CONFIG_TYPE" != "DISTRIBUTED"  &&  "$TDS_CONFIG_TYPE" != "PEERTOPEER" ]]; then
    	cl_rsh $fallover_node "mkdir -p $HAWS_HOME/scripts" > /dev/null 2>&1
    	if [[ $? -eq 0 ]] ; then
			logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "mkdir $HAWS_HOME/scripts on node $fallover_node"
    	else
			logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "mkdir did not work"
			logmsg HAWS_WARN "$MSG_MKDIR_ERROR" "The directory %s could not be created on node %s.\n" "$HAWS_HOME/scripts" "$fallover_node"
    	fi
	fi

    #Copy previous import create script
    if [[ -f $create_script ]]; then
	logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "Copying previous script $create_script\n"
	cp -fp $create_script ${create_script}.prev > /dev/null 2>&1
    fi

    #Copy previous import delete script
    if [[ -f $delete_script ]]; then
	logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "Copying previous script $delete_script\n"
	cp -fp $delete_script ${delete_script}.prev > /dev/null 2>&1
    fi

    #Copy previous configuration script
    if [[ -f $cfg_script ]]; then
	logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "Copying previous script $cfg_script\n"
	cp -fp $cfg_script ${cfg_script}.prev > /dev/null 2>&1
    fi

    #Copy previous Application Monitor script
    if [[ -f $app_monitor_script ]]; then
	logmsg HAWS_DEBUG  "$MSG_DEBUG_MSG" "Copying previous script $app_monitor_script\n"
	cp -fp $app_monitor_script ${app_monitor_script}.prev > /dev/null 2>&1
    fi
}


###############################################################################
# Function: check_fs_space
###############################################################################
#
# Check the free space on the local node and exit if not enough.
###############################################################################
check_fs_space() {
    logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "Checking for free space on $HAWS_HOME "

    typeset -i freemb=0
    freemb=`df -k $HAWS_HOME | grep -v '^Filesystem' | awk '{print $3}'`
    if [[ $freemb -lt $HAWS_MIN_FREE_SPACE ]]; then
	logmsg HAWS_ERROR "$MSG_NOT_ENOUGH_FREE_SPACE" "There is not enough free space in %s.\n" $HAWS_HOME
	exit $HAWS_EXIT_FAIL
    fi
}


###############################################################################
# Function: init_java
###############################################################################
#
# Initializa Java
###############################################################################
init_java() {
    # Initialize variables so that we can call java programs
    # Establish the path(s) for calling Java programs
    JAVA=$WAS_INSTALL_DIR/java/bin/java
    JAVACP="-classpath $WAS_INSTALL_DIR/lib/xerces.jar:$WAS_INSTALL_DIR/lib/j2ee.jar:$HAWS_HOME/sbin"

    # These are the programs that we can call
    GETSERVERNAME=GetServerNames
}


###############################################################################
# Function: complete_scripts
###############################################################################
#
# After the generated scripts have been made, set permissions and copy the
# necessary ones to the fallover node.  Finally, tell the user what to do
# next.
###############################################################################
complete_scripts() {

    fallover_node=$1

    #import create script
    chmod u+x $create_script
    logmsg HAWS_TRACE  "$MSG_SCRIPT_NAME_IS" "Script name is %s\n" $create_script

    #import delete script
    chmod u+x $delete_script
    logmsg HAWS_TRACE  "$MSG_SCRIPT_NAME_IS" "Script name is %s\n" $delete_script
    cl_rcp $delete_script $fallover_node:$delete_script > /dev/null 2>&1
    if [[ $? -eq 0 ]] ; then
	logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "Copied delete script to node $fallover_node"
    else
	logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "Copy did not work"
	logmsg HAWS_WARN "$MSG_COPY_ERROR" "The file %s could not be copied to node %s.  \nPlease copy this file before continuing.\n" $delete_script $fallover_node
    fi

    #configuration script
    chmod 700 $cfg_script
    logmsg HAWS_TRACE  "$MSG_SCRIPT_NAME_IS" "Script name is %s\n" $cfg_script
    cl_rcp $cfg_script $fallover_node:$cfg_script > /dev/null 2>&1
    if [[ $? -eq 0 ]] ; then
	logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "Copied configuration script to node $fallover_node"
    else
	logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "Copy did not work"
	logmsg HAWS_WARN "$MSG_COPY_ERROR" "The file %s could not be copied to node %s.  \nPlease copy this file before continuing.\n" $cfg_script $fallover_node
    fi

    #Application Monitor Script
    chmod u+x $app_monitor_script
    logmsg HAWS_TRACE  "$MSG_SCRIPT_NAME_IS" "Script name is %s\n" $app_monitor_script
    cl_rcp $app_monitor_script $fallover_node:$app_monitor_script > /dev/null 2>&1
    if [[ $? -eq 0 ]] ; then
	logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "Copied application monitor script to node $fallover_node"
    else
	logmsg HAWS_DEBUG "$MSG_DEBUG_MSG" "Copy did not work"
	logmsg HAWS_WARN "$MSG_COPY_ERROR" "The file %s could not be copied to node %s.  \nPlease copy this file before continuing.\n" $app_monitor_script $fallover_node
    fi

    #Final information message
    logmsg HAWS_INFO "$MSG_CLHAWS_IMPORT_DONE" "The scripts to modify the PowerHA SystemMirror configuration have been created.  \n\
To import the configuration, run the script \n\
%s\n\
To remove the configuration, run the script \n\
%s\n\
After running either script do cluster verification and \n\
synchronization to complete the action.\n" $create_script $delete_script

}

###############################################################################
# Function: get_cluster_state
###############################################################################
# Set CLUSTER_STATE variable to 0 of cluster is down or up and in stable
# state or to 1 otherwise.
###############################################################################
get_cluster_state() {
    CLUSTER_STATE=0
    clstate=$(LC_ALL=C; lssrc -ls clstrmgrES | grep "Current state:" | cut -f2 -d":")
    if [[ -n "$clstate" && "$clstate" != " ST_STABLE" && "$clstate" != " NOT_CONFIGURED" && "$clstate" != " ST_INIT" ]] ; then
	CLUSTER_STATE=1
    fi
}

###############################################################################
# Function: get_instance_list
###############################################################################
get_instance_list() {
    version=`lslpp -Lc | grep $TDS_PACKAGE_NAME 2>/dev/null | sed -e '/^#/d' | cut -f3 -d':' | cut -f 1,2 -d '.'`
    TDS_VERSION=`echo $version | awk '{print $1}'`
    TDS_INSTALL_DIR="$TDS_INSTAL_DIR""V""$TDS_VERSION"
	IDSILIST="$TDS_INSTALL_DIR""/sbin/idsilist"

    	$IDSILIST -a -b $TMP_INSTANCE_LIST_FILE
		integer i
    	i=1
    	while read line
    	do
       		str=`echo $line | cut -d':' -f1`
       		if [[ "$str" = "Name" ]]; then
           		str1=`echo $line | cut -d':' -f2 | sed -e 's/ //g'`
           		instanceList[$i]="${str1}"
       		fi
       		if [[ "$str" = "Location" ]]; then
           		str2=`echo $line | cut -d':' -f2 | sed -e 's/ //g'`
           		instanceHomeList[$i]="${str2}"
           		let i=${i}+1
       		fi

    	done<"$TMP_INSTANCE_LIST_FILE"
	rm -f $TMP_INSTANCE_LIST_FILE
}

###############################################################################
# Function: get_instance_home
###############################################################################
get_instance_home() {
	instance=$1
	version=`lslpp -Lc | grep $TDS_PACKAGE_NAME 2>/dev/null | sed -e '/^#/d' | cut -f3 -d':' | cut -f 1,2 -d '.'`
    TDS_VERSION=`echo $version | awk '{print $1}'`
    TDS_INSTALL_DIR="$TDS_INSTAL_DIR""V""$TDS_VERSION"
	IDSILIST="$TDS_INSTALL_DIR""/sbin/idsilist"

		$IDSILIST -a -b $TMP_INSTANCE_LIST_FILE -I $instance
		integer i
    	i=1
    	while read line
    	do
       		str=`echo $line | cut -d':' -f1`
       		if [[ "$str" = "Name" ]]; then
           		str1=`echo $line | cut -d':' -f2 | sed -e 's/ //g'`
#           		instance="${str1}"
       		fi
       		if [[ "$str" = "Location" ]]; then
           		str2=`echo $line | cut -d':' -f2 | sed -e 's/ //g'`
           		instanceHome="${str2}"
           		let i=${i}+1
       		fi

    	done<"$TMP_INSTANCE_LIST_FILE"
	rm -f $TMP_INSTANCE_LIST_FILE
}
