#!/usr/bin/ksh93 # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # 61haes_r714 src/43haes/usr/sbin/cluster/sa/filenet/sbin/cl_filenet_db_ce_pe_preimport.sh 1.1 # # Licensed Materials - Property of IBM # # COPYRIGHT International Business Machines Corp. 2010 # All Rights Reserved # # US Government Users Restricted Rights - Use, duplication or # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # # IBM_PROLOG_END_TAG # @(#)43 1.1 src/43haes/usr/sbin/cluster/sa/filenet/sbin/cl_filenet_db_ce_pe_preimport.sh, hacmp, 61haes_r714 11/28/11 15:17:41 ## ## NAME: cl_filenet_db_ce_pe_preimport ## ## DESCRIPTION: ## This script performs prevalidation on the user selected values ## to ensure the selected instance, database, and other attributes ## are appropriate for the environment. This script is always called ## via SMIT either from the Add an Instance, or via the Mutual Takeover ## assistant. ## ## In addition the service IP label network is selected and provided ## as an argument to the cl_db2import script. ## ## ARGUMENTS: ## [ -T ] - Perform takeover discovery ## [ -l ServiceLabel ] ## [ -d Database ] ## [ -i InstanceName ] ## [ -o PrimaryNode ] ## [ -n TakeoverNodes ] ## [ -M ] - Modify Mode ## ## ##--------------------------------------------------------------------------- . /usr/es/lib/ksh93/func_include version='1.1 $Source: 61haes_r711 43haes/usr/sbin/cluster/sa/filenet/sbin/cl_filenet_db_ce_pe_preimport.sh 2$' #---------------------------------------------------------------------------- # Global Definitions #---------------------------------------------------------------------------- # Set the FPATH for all DB2 / HACMP functions FLIB=/usr/es/lib/ksh93 FPATH=$FLIB/utils:$FLIB/hacmp:$FLIB/db2:$FLIB/db2/vg:$FLIB/util:$FLIB/util/list:$FLIB/aix/:$FLIB/aix/odm/:$FLIB/sa PATH=$PATH:/usr/es/sbin/cluster/sa/db2/sbin/:/usr/es/sbin/cluster/utilities/:/usr/es/sbin/cluster/:/usr/es/sbin/cluster/sa/sbin/ DB2SA_BIN_DIR=/usr/es/sbin/cluster/sa/db2/sbin DB2_ETC_PATH=/usr/es/sbin/cluster/sa/db2/etc TAKEOVER_DISCOVERY=false FLAGS="" MODIFY_MODE=false # # Require for KLIB_SA_logmsg # ERROR_FLAG="true" KLIB_OUTPUT_CONSOLE="true" #---------------------------------------------------------------------------- # Functions: # check_userexit # check_sharedvgs # check_vgs_unique # check_vgs_concurrent # check_inactive_instance # check_service_ip # instance_validation #---------------------------------------------------------------------------- # # Function: check_service_ip # # Description: Determine if the service IP or any IP label is resolvable # on the local node (runs host IP label) # # Arguments: ip label - label to check # # Returns: 0 - host was resolved # 1 - host was not resolved # function check_service_ip { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset service_ip=$1 errmsg 7 $SERVICE_LABEL host $SERVICE_LABEL >/dev/null 2>&1 || { # Failed errmsg 200 return 1 } # Passed errmsg 100 } #---------------------------------------------------------------------------- # # Function: check_userexit # # Description: If the user has enabled the userexit option for the # DB2 database and logretain is set to RECOVERY, then # the user must have provided a value for either the # archive logpath, the retrieve logpath or the audit # error logpath. If none of the environment variables # are set in the db2profile, or userprofile files for # the instance then log an error message and indicate # that these values must be provided # # Arguments: instance - scalar name of the DB2 UDB instance to verify # # Returns: 0 on success # 1 on failure # function check_userexit { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x instance=$1 if [[ -z $instance ]]; then errmsg 200 return 1 fi typeset databases=$(KLIB_DB2_disc_get_instance_value $instance "DATABASES") typeset userexit typeset logretain # For each database defined to this instance, validate that the instance # database if configured with logretain=recovery and userexit=on has one # of the paths set in the environment such that discovery can determine # the volume group associated with recovery. # for db in $databases; do logretain=$(KLIB_DB2_disc_get_instance_value $instance "DB_CFG_""$db""_LOGRETAIN") userexit=$(KLIB_DB2_disc_get_instance_value $instance "DB_CFG_""$db""_USEREXIT") if [[ "$logretain" == "RECOVERY" && "$userexit" == "ON" ]]; then # One of the variables ARCHIVE_PATH, RETRIEVE_PATH, or AUDIT_ERROR_PATH # must be set archive_path=$(KLIB_DB2_disc_get_instance_value $instance "DBM_ENV_ARCHIVE_PATH") retrieve_path=$(KLIB_DB2_disc_get_instance_value $instance "DBM_ENV_RETRIEVE_PATH") audit_error_path=$(KLIB_DB2_disc_get_instance_value $instance "DBM_ENV_AUDIT_ERROR_PATH") if [[ -z $archive_path && -z $retrieve_path && -z $audit_error_path ]]; then errmsg 200 abort 16 instance $db fi fi done errmsg 100 return 0 } #---------------------------------------------------------------------------- # # Name: check_shared_vgs # # Description: # Validate that the shared volume groups are accessible from all of the # takeover nodes, and that the shared VG is actually shared amongst all # of the nodes. # # Arguments: # instance name - scalar # nodes - by reference, list of the cluster nodenames to verify # # Returns: # 0 - on success # 1 - on failure # function check_shared_vgs { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset instance=$1 typeset -n nodes=$2 typeset pvids typeset homevg=$(KLIB_DB2_disc_get_instance_value $instance "DB2HOME_VG") typeset othervgs=$(KLIB_DB2_disc_get_instance_value $instance "OTHERVGS") typeset source_node=$(KLIB_DB2_disc_get_instance_value $instance "CLUSTER_NODE") typeset -A checked_vgs typeset vgs="$homevg $othervgs" # if the home volume group wasn't discovered, abort if [[ -z $homevg ]]; then abort 9 $instance fi # # In some configuration (example: SAP Smart Assist which internally # uses DB2 Smart Assist) instance home dir could be on rootvg or # non-shared VG. In such configuration a warning msg would be # thrown to let user make sure that instance home dir should be # configured and available on other nodes. # [[ "$homevg" == "rootvg" ]] && { errmsg 27 "$instance" "rootvg" typeset vgs="$othervgs" } # check each VG to ensure all nodes have access via PVID to the vg for vg in $vgs; do if [[ -n ${checked_vgs[$vg]} ]]; then continue fi checked_vgs[$vg]=: errmsg 8 $vg # Now check the other nodes specified as an argument to this function # at least one node should be capable of sharing this vg. for node in $nodes; do if [[ "$node" != "$source_node" ]]; then KLIB_DB2_VG_node_pvid_compare $source_node $node $vg || { if [[ "$vg" == "$homevg" ]]; then errmsg 27 "$instance" "$homevg" else errmsg 200 abort 10 "$vg" "$source_node" "$node" "$instance" fi } fi done errmsg 100 done return 0 } #---------------------------------------------------------------------------- # # Name: check_vgs_unique # # Description: Validate that the DB2 instance volume groups are unique # meaning no other DB2 instance uses those volume groups # # Arguments: instance name - scalar (instance to validate) # # Returns: 0 - on success # 1 - on failure # function check_vgs_unique { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset instance=$1 typeset -n nodes=$2 typeset source_node=$(KLIB_DB2_disc_get_instance_value $instance "CLUSTER_NODE") typeset inst errmsg 21 $instance if [[ -z $instance ]]; then errmsg 200 return 1 fi typeset OTHERVGS=$(KLIB_DB2_disc_get_instance_value $instance OTHERVGS) typeset HOMEVG=$(KLIB_DB2_disc_get_instance_value $instance DB2HOME_VG) typeset VGS="$OTHERVGS $HOMEVG" for node in $nodes; do if [[ "$node" != "$source_node" ]]; then KLIB_DB2_VG_node_pvid_compare $source_node $node $HOMEVG || { typeset VGS="$OTHERVGS" break } fi done for inst in $DB2_INSTANCES; do # If this is not our instance, look to see if any of our VGs match shared_vgs= if [[ "$inst" != "$instance" ]]; then OTHERVGS=$(KLIB_DB2_disc_get_instance_value $inst OTHERVGS) HOMEVG=$(KLIB_DB2_disc_get_instance_value $inst DB2HOME_VG) for vgA in $VGS; do for vgB in $HOMEVG $OTHERVGS; do if [[ "$vgA" == "$vgB" ]]; then shared_vgs="$vgB $shared_vgs" fi done done if [[ -n $shared_vgs ]]; then errmsg 200 abort 20 $instance !shared_vgs $inst fi fi done errmsg 100 return 0 } #---------------------------------------------------------------------------- # # Name: check_vgs_concurrent # # Description: # Validate that the volume groups discovered are non-concurrent # for the specified DB2 UDB instance. If there are concurrent # VGs report a warning to the user indicating that use of # concurrent VGs may result in data loss. # # Arguments: # instance name - name of instance to validate the VGs are # non-concurrent # # Returns: 0 for success (no concurrent VGs) # 1 if there are concurrent VGs # function check_vgs_concurrent { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset instance=$1 errmsg 22 $instance if [[ -z $instance ]]; then errmsg 200 return 1 fi typeset OTHERVGS=$(KLIB_DB2_disc_get_instance_value $instance OTHERVGS) typeset HOMEVG=$(KLIB_DB2_disc_get_instance_value $instance DB2HOME_VG) typeset VGS="$OTHERVGS $HOMEVG" conc_vgs= for vg in $VGS; do if [[ -n ${AIX_DISKS["CONCURRENT_$vg"]} ]]; then value=${AIX_DISKS[CONCURRENT_$vg]} conc_vgs="$vg ($value) $conc_vgs" fi done if [[ -n $conc_vgs ]]; then errmsg 200 abort 23 $instance !conc_vgs fi errmsg 100 return 0 } #---------------------------------------------------------------------------- # # Name: check_inactive_instance # # Description: Checks for signs of an inactive DB2 instance # if the DB2 DB discovery information is blank for all DB2 # Databases discovered, then report a warning indicating the # user may need to add any additional volume groups beyond the # instance home volume group. # # Arguments: instance - instance name # # Returns: 1 - the instance is inactive # 0 - the instance was active # function check_inactive_instance { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset instance="$1" [[ -z $instance ]] && return 1 typeset databases=$(KLIB_DB2_disc_get_instance_value $instance DATABASES) [[ -z $databases ]] && return 1 typeset -i nonempty_vals=0 for db in $databases; do for val in $DB2_DB_VARIABLES; do token="DB_CFG_""$db""_""$val""" dbval=$(KLIB_DB2_disc_get_instance_value $instance $token) [[ -n $dbval ]] && (( nonempty_vals += 1 )) done done # There are no DB2 DB online, thus the instance must be offline # report this as a warning. if (( $nonempty_vals == 0 )); then typeset vg=$(KLIB_DB2_disc_get_instance_value $instance DB2HOME_VG) errmsg 26 $instance $vg fi } #---------------------------------------------------------------------------- # # Name: instance_validation # # Description: # This script will validate the various components of DB2 # to ensure the DB2 instance meets the following criteria # # * DB2 Instance Home Directory Must reside on a filesystem # that is on shared storage (shared VG) # # * The DB2 instance type must be UDB (non-partitioned), and # not DPF. # # * Database selected must exist in the selected DB2 instance # # * If the database variable USEREXIT=ON and LOGRETAIN=RECOVERY # then the DBM_ENV_ variables ARCHIVE_PATH, RETRIEVE_PATH and # AUDIT_ERROR_PATH must be set in the db2profile or userprofile # files. All of this information is contained within the # DB2 discovery file for each instance discovered # # * Shared volume groups must be accessible on all nodes # where a particular instance might reside in the cluster # This requires all hdisks for the instance VG(s) to exist # on all participating nodes. # function instance_validation { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset instance="$1" typeset database_to_monitor="$2" typeset -n nodes=$3 # What type of DB2 instance is this, if its DPF (partitioned), then abort errmsg 6 $instance typeset dbtype=$(KLIB_DB2_disc_get_instance_value $instance "INSTANCE_TYPE") [[ "$dbtype" == "DPF" ]] && { errmsg 200 abort 5 $instance } errmsg 100 # Check to see if the DB2 instance is active # if the DB2 instance is inactive then we've potentially missed # some of the shared volume groups (tablespaces, logs, etc) that # the DB2 instance relies on. Report a warning indicating that additional # volume groups may need to be added by hand, if the instance relies on # more vgs that just the instance home vg. # check_inactive_instance $instance errmsg 15 " $instance" check_userexit $instance # # Check to ensure all of the volume groups marked as the home VG # have the same set of pvids on one or more cluster nodes the user # specified. The pvids do need to be accessible, but the VG is not # required to be imported. # # Using the VG / pvid info generate a list of nodes that # the instance could be imported on # check_shared_vgs $instance nodes # # Check to make sure volume groups are unique amonst this DB2 instace # meaning there are no other discovered DB2 instances with one or more # of the same volume groups. # check_vgs_unique $instance nodes # # Check to ensure the user has a DB2 UDB instance with only non-concurrent # volume groups defined. # check_vgs_concurrent $instance # # Check to ensure the database selected exists in the selected DB2 instance # errmsg 17 "$instance" "$database_to_monitor" DATABASES=$(KLIB_DB2_disc_get_instance_value $instance DATABASES) for dbname in $DATABASES; do found=0 for db_mon in $database_to_monitor; do if [[ "$dbname" == "$db_mon" ]]; then found=1 break fi done # If the database was not found, report an error if (( found == 0 )); then errmsg 200 abort 18 $db_mon $instance fi done errmsg 100 return 0 } #---------------------------------------------------------------------------- # # Name: importConfigFromFile # # Description: # This function will read the supplid config file and create HACMP # resources to configure DB2 Database for HA. # # Arguments: # N/A # # Returns: # 0 - on success # 1 - on failure # importConfigFromFile() { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x configure=0 # # Find out whether end user is interested in configuring # database for content engine # [[ "$COMPONENT" == "Content_Engine_db2_database" ]] && configure_db2_content_engine=$(clsaxmlutil -s -x $CONFIG_FILE \ -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT -a configure) # # Find out whether end user is interested in configuring # database for process engine # [[ "$COMPONENT" == "Process_Engine_db2_database" ]] && configure_db2_process_engine=$(clsaxmlutil -s -x $CONFIG_FILE \ -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT -a configure) [[ -n $configure_db2_process_engine && "$configure_db2_process_engine" == "yes" ]] && { COMPONENT_ID="FILENET_DB_PROCESS_ENGINE" configure=1 } [[ -n $configure_db2_content_engine && "$configure_db2_content_engine" == "yes" ]] && { COMPONENT_ID="FILENET_DB_CONTENT_ENGINE" configure=1 } (( $configure == 0 )) && { return 1 } dse_dir=$(LC_ALL=C clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT | grep DSE_INSTALL_DIR) (( $? != 0 )) && { abort 28 } DSE_INSTALL_DIR=$(echo $dse_dir | cut -d"=" -f2) if [[ -z $DSE_INSTALL_DIR ]]; then errmsg 29 else export DSE_INSTALL_DIR fi primary_node=$(LC_ALL=C clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT | grep PrimaryNode) (( $? != 0 )) && { abort 28 } primary_node=$(echo $primary_node | cut -d"=" -f2) KLIB_HACMP_is_known_node $primary_node (( $? != 0 )) && { abort 30 $primary_node } PRIMARY_NODE=$primary_node takover_nodes=$(LC_ALL=C clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT | grep TakeoverNodes) (( $? != 0 )) && { abort 28 } for tnode in $takeover_nodes do KLIB_HACMP_is_known_node $tnode (( $? != 0 )) && { abort 31 $takeover_nodes } done TAKEOVER_NODES=$takeover_nodes inst=$(LC_ALL=C clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT | grep INSTANCE_NAME) (( $? != 0 )) && { abort 28 } inst=$(echo $inst | cut -d "=" -f2) INSTANCE=$inst APPLICATION_NAME="${INSTANCE}_APP" db=$(LC_ALL=C clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT | grep DATABASE_NAME) (( $? != 0 )) && { abort 28 } db=$(echo $db | cut -d"=" -f2) DATABASE_NAME=$db addrs=$(LC_ALL=C clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT | grep IPAddress_or_name) (( $? != 0 )) && { abort 28 } addrs=$(echo $addrs | cut -d"=" -f2) SERVICE_LABEL=$addrs netmask=$(LC_ALL=C clsaxmlutil -s -x $CONFIG_FILE -m $DB2_MANUAL_CONFIG_SCHEMA -t $COMPONENT | grep Prefix_or_Netmask) (( $? != 0 )) && { abort 28 } NETMASK_PREFIX_LEN=$(echo $netmask | cut -d"=" -f2) FLAGS=-S /usr/es/sbin/cluster/sa/db2/sbin/cl_db2smadd -A -n $PRIMARY_NODE > /dev/null 2>&1 return $? } #---------------------------------------------------------------------------- # Main: #---------------------------------------------------------------------------- # Read in the message catalog entries . /usr/es/sbin/cluster/sa/db2/cat/cl_db2preimport # Read in the init functions, abort, require, errmsg, etc. . /usr/es/lib/ksh93/common_functions.ksh93 # Read in the DB2 definitions . /usr/es/sbin/cluster/sa/db2/etc/db2_definitions umask -S u=rw,g=,o= # Read in the DB2 Discovery Information [[ -f /usr/es/sbin/cluster/sa/db2/etc/db2.disc ]] && . /usr/es/sbin/cluster/sa/db2/etc/db2.disc # By default don't perform takeover discovery, meaning don't discover # the instances accessible on the takeover node. This useful for adding # a single instance when the takeover node was just added to the cluster # configuration. # typeset DB2_MANUAL_CONFIG_SCHEMA="/usr/es/sbin/cluster/sa/filenet/config/cl_filenet_manual_config.xsd" while getopts C:A:TMFSo:n:i:d:l:P:f:c: option do case $option in M) # Delete the pre-existing configuration (modify mode) FLAGS="-M" MODIFY_MODE=true ;; A) APPLICATION_NAME=$OPTARG ;; T) # Perform discovery on the takeover node (for Add an instance) TAKEOVER_DISCOVERY=true ;; C) COMPONENT_ID=$OPTARG ;; o) PRIMARY_NODE=$OPTARG ;; n) TAKEOVER_NODES=$OPTARG TAKEOVER_NODES_TOKENIZED=$(echo $TAKEOVER_NODES | sed -e "s/ /\:/g") TAKEOVER_NODES=$(echo $TAKEOVER_NODES | sed -e "s/\:/ /g") ;; i) INSTANCE=$OPTARG ;; d) DATABASE_NAME=$OPTARG ;; l) SERVICE_LABEL=$OPTARG ;; F) # First resource group in mutual configuration FLAGS="$FLAGS -F" ;; S) # Second resource group in mutual configuration FLAGS="$FLAGS -S" ;; P) # Netmask (IPv4)/Prefix Length (IPv6) NETMASK_PREFIX_LEN=$OPTARG ;; f) # Manual Configuration Mode MANUAL_CONFIG=true CONFIG_FILE=$OPTARG ;; c) # Component in Manual configuration mode COMPONENT=$OPTARG ;; esac done # # Before handling anything else, check if we have to configure from XML # if [[ $MANUAL_CONFIG == true ]]; then if [[ ! -f $CONFIG_FILE ]]; then dspmsg -s 51 cluster.cat 26 "Unable to read the configuration file. Please ensure the correct path" return 1 fi importConfigFromFile ret=$? if [[ ret && -f /usr/es/sbin/cluster/sa/db2/etc/db2.disc ]] then . /usr/es/sbin/cluster/sa/db2/etc/db2.disc else return 1 fi fi [[ -z $APPLICATION_NAME ]] && { echo $APPLICATION_NAME is empty exit 1 } # # If we are adding, not during change or show config # [[ "$MODIFY_MODE" == "false" ]] && { # # Finding out the Database for Content and Process engine is already configured or not # RESOURCE_GROUP_CONFIGURED="" RESOURCE_GROUP_CONFIGURED=$(clodmget -q "name=FILENET_DB_PE_CE_RESOURCE_GROUP" -f value -d "=" HACMPsa_metadata) [[ -n $RESOURCE_GROUP_CONFIGURED ]] && { RESOURCE_GROUP_CONFIGURED=${RESOURCE_GROUP_CONFIGURED//\"/} APPLICATION="" APPLICATION=$(clodmget -q "name=FILENET_DB_PE_CE_RESOURCE_GROUP" -f application_id -d "=" HACMPsa_metadata) APPLICATION=${APPLICATION//\"/} KLIB_SA_logmsg ERROR 11 1 filenetsa.cat "The Database for Process and Content Engine has been \ already configured\n\ with Resource Group %1\$s under Application %2\$s" $RESOURCE_GROUP_CONFIGURED $APPLICATION exit 1 } # # Finding out DB for process engine is trying to get configured again even though # it is being already configured # [[ "$COMPONENT_ID" == "FILENET_DB_PROCESS_ENGINE" ]] && { RESOURCE_GROUP_CONFIGURED="" RESOURCE_GROUP_CONFIGURED=$(clodmget -q "name=FILENET_DB_PE_RESOURCE_GROUP" -f value -d "=" HACMPsa_metadata) [[ -n $RESOURCE_GROUP_CONFIGURED ]] && { RESOURCE_GROUP_CONFIGURED=${RESOURCE_GROUP_CONFIGURED//\"/} APPLICATION="" APPLICATION=$(clodmget -q "name=FILENET_DB_PE_RESOURCE_GROUP" -f application_id -d "=" HACMPsa_metadata) APPLICATION=${APPLICATION//\"/} KLIB_SA_logmsg ERROR 11 2 filenetsa.cat "The Database for %1\$s Engine has been \ already configured\n\ with Resource Group %2\$s under Application %3\$s" Process $RESOURCE_GROUP_CONFIGURED $APPLICATION exit 1 } } # # Finding out DB for content engine is trying to get configured again even though # it is being already configured # [[ "$COMPONENT_ID" == "FILENET_DB_CONTENT_ENGINE" ]] && { RESOURCE_GROUP_CONFIGURED="" RESOURCE_GROUP_CONFIGURED=$(clodmget -q "name=FILENET_DB_CE_RESOURCE_GROUP" -f value -d "=" HACMPsa_metadata) [[ -n $RESOURCE_GROUP_CONFIGURED ]] && { RESOURCE_GROUP_CONFIGURED=${RESOURCE_GROUP_CONFIGURED//\"/} APPLICATION="" APPLICATION=$(clodmget -q "name=FILENET_DB_CE_RESOURCE_GROUP" -f application_id -d "=" HACMPsa_metadata) APPLICATION=${APPLICATION//\"/} KLIB_SA_logmsg ERROR 11 2 filenetsa.cat "The Database for %1\$s Engine has been \ already configured\n\ with Resource Group %2\$s under Application %3\$s" Content $RESOURCE_GROUP_CONFIGURED $APPLICATION exit 1 } } # # Finding out any input DB under input DB2 instance # is already configured or not. # APPLICATION="" APPLICATION=$(clodmget -q "value=${INSTANCE}_ResourceGroup" -f application_id -d "=" HACMPsa_metadata) [[ -n $APPLICATION ]] && { APPLICATION=${APPLICATION//\"/} # # Finding out existing configured DBs to # monitor for the input DB2 instance # DBS_TO_MONITOR="" DBS_TO_MONITOR=$(clodmget -q "application_id=$APPLICATION and name=DATABASE_TO_MONITOR" \ -f value -d "=" HACMPsa_metadata) DBS_TO_MONITOR=${DBS_TO_MONITOR//\"/} # # Finding out any input DB is already configured # for DB in $DATABASE_NAME do KLIB_UTIL_LIST_is_in_list DBS_TO_MONITOR $DB (( $? == 0 )) && { KLIB_SA_logmsg ERROR 9999 9999 filenetsa.cat "The Database %1\$s under \ input DB2 instance %2\$s is already configured in Resource Group %3\$s\n\ under application %4\$s" $DB $INSTANCE ${INSTANCE}_ResourceGroup $APPLICATION exit 1 } done } } PARTICIPATING_NODES="$PRIMARY_NODE $TAKEOVER_NODES" # Alert the user we're performing pre-verification of the DB2 instance errmsg 13 $INSTANCE !PARTICIPATING_NODES # Is the service IP label resolveable on the local node? check_service_ip $SERVICE_LABEL || abort 4 $SERVICE_LABEL # Perform discovery on the takeover nodes (-T flag) $TAKEOVER_DISCOVERY && { echo # Disable summary reporting, but enabled verbose logging /usr/es/sbin/cluster/sa/db2/sbin/cl_db2smadd -v \ -N -n $TAKEOVER_NODES_TOKENIZED || { echo $? exit $? } # Re-read the DB2 discovery file after performing discovery # on the takeover nodes [[ -f /usr/es/sbin/cluster/sa/db2/etc/db2.disc ]] && . /usr/es/sbin/cluster/sa/db2/etc/db2.disc } # Perform network discovery after added the takeover # nodes, determine which is the best network to use # Is the service IP label already defined to HACMP? NETWORK=$(KLIB_HACMP_get_interface_network $SERVICE_LABEL) # If this service IP label isn't already defined, find a suitable network # to create the service IP label on, then pass this network to # cl_db2import which will create the service IP if [[ -z $NETWORK ]]; then # Find a useable network for this service IP label, this # is the same method used by the 2 node configuration assistant NETWORK=$(KLIB_HACMP_get_net_with_most_interfaces $SERVICE_LABEL) [[ -z $NETWORK || $? != 0 ]] && abort 1 "$SERVICE_LABEL" else # Make sure the service IP label is not an IPAT service nettype=$(KLIB_HACMP_get_network_type $NETWORK) if [[ "$nettype" != "alias" ]]; then abort 11 "$SERVICE_LABEL" "$nettype" fi fi # Alert the user to the network being used. errmsg 2 "$NETWORK" "$SERVICE_LABEL" # Perform instance validation using the user selected nodes instance_validation "$INSTANCE" "$DATABASE_NAME" PARTICIPATING_NODES errmsg 14 /usr/es/sbin/cluster/sa/filenet/sbin/cl_filenet_db_ce_pe_import -a -v -i $INSTANCE \ -A "$APPLICATION_NAME" \ -d "$DATABASE_NAME" \ -w $NETWORK \ -l $SERVICE_LABEL \ -o $PRIMARY_NODE \ -n "$TAKEOVER_NODES_TOKENIZED" \ -C "$COMPONENT_ID" \ $FLAGS