#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2020,2021. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # @(#) 7d4c34b 43haes/usr/sbin/cluster/sa/sap/sbin/cl_modifysapinstance.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM ## ## NAME: ## cl_modifysapinstance ## ## PURPOSE: ## Modifies the existing HACMP configurations for different SAP instances ## types such as Application Server instances, SCS instances, ERS instances ## SAP database and also SAP Global filesystem ## ## ARGUMENTS: ## ## Modifying SAP Global File System ## :== -t GFS ## -p ## -T ## -I ## -V ## -E ## -M ## ## Modifying Application Server Instance ## :== -t AS ## -s SAP System ID ## -i ## -a ## -p ## -T ## -I ## -n ## -V ## -r ## Modifying SCS Instance ## :== -t AS ## -s SAP System ID ## -i ## -a ## -p ## -T ## -I ## -n ## -V ## -r ## Modifying ERS Instance ## :== -t ERS ## -s SAP System ID ## -i ## -a ## -P ## -I ## -n ## -V ## ## RETURNS ## 0 on success ## 1 on failure ## . /usr/es/lib/ksh93/func_include version='1.2.1.10 $Source: 61haes_r711 43haes/usr/sbin/cluster/sa/sap/sbin/cl_modifysapinstance.sh 1$' #---------------------------------------------------------------------------- # Global Definitions #---------------------------------------------------------------------------- . /usr/es/sbin/cluster/sa/sap/etc/SAPGlobals . /usr/es/sbin/cluster/sa/sap/sbin/SAPUtilities PATH=$PATH:/usr/es/sbin/cluster/sa/sbin PATH=$PATH:/usr/es/sbin/cluster/utilities typeset PROGRAM=${0##*/} typeset PRINODE TAKENODE # primary and takeover nodes typeset VOLUME_GROUP # shared volume groups typeset VIRTUAL_IPS # service IP label typeset APPLICATION_ID # Application ID for App Discovery typeset SMARTASSIST_ID="SAPNW" typeset COMPONENT_ID= typeset INSTANCE typeset MOUNT_DIR typeset EXPORT_DIR typeset SAPSID typeset INSTANCE_NO # SAP Instance number typeset SERVICE_NETWORK typeset DBRG typeset PARTICIPATING_NODES typeset SAPADMNUSR typeset -i ers_invoke typeset -i scs_invoke=0 # : Log the failure status - it might be interesting # typeset clutils_log_dir=$(clodmget -q "name = clutils.log" -f value -n HACMPlogs) clutils_log_dir=${clutils_log_dir:-/var/hacmp/log} typeset clutils_log=${clutils_log_dir}/clutils.log #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- # Function: # ODMDeleteandAdd # # Purpose: # To delete and add the ODM entries in HACMPsa_metadata # # Arguments: # APPLICATION_ID VOLUME_GROUP VIRTUAL_IPS SERVICE_NETWORK # # Returns: # 0 For sucess # 1 For failure #---------------------------------------------------------------------------- function ODMDeleteandAdd { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset appid=$1 shift typeset values for name in $@ ; do odmdelete -o HACMPsa_metadata -q "application_id=$appid and name=$name" 2> /dev/null eval "values=\${$1}" shift for i in $(echo $values) ; do print "HACMPsa_metadata: application_id = $appid name = $name value = $i reserved = 0" | odmadd 2> /dev/null done done return 0 } #---------------------------------------------------------------------------- # Function: # addServiceIPLabeltoNetwork # # Purpose: # Adds Service IP label to specific network of the HACMP configuration if # it doesn't exist. # # Arguments: # service_ip and network # # Returns: # 0 on success # 1 on failure #---------------------------------------------------------------------------- function addServiceIPLabeltoNetwork { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset results typeset service_ip=$1 typeset network=$2 # # Can the service IP label be resolved? # /usr/bin/host $service_ip >/dev/null 2>&1 (( $? != 0 )) && { # The service IP label cannot be resolved #user_msg 30 14 $service_ip KLIB_SAP_SA_logmsg ERROR 30 14 sapsa.cat "14 ERROR: The Service IP label: %s is not resolvable on the \ local system. Please check to ensure the IP label is resolvable via either \ DNS, or /etc/hosts.\n" $service_ip exit 1 } result=$(clmgr query interface | grep $service_ip) [[ -n $result ]] && clmgr -f delete interface $service_ip KLIB_SAP_SA_logmsg INFO 40 2 sapsa.cat "\tCreating service IP label: %s\n" $service_ip if [[ -n $PRINODE || -n $TAKENODE ]] then typeset nodes="$PRINODE $TAKENODE" [[ -z $network ]] && { network=$(getServiceNetwork nodes) } fi if [[ -n "${PARTICIPATING_NODES[@]}" ]] then typeset nodes="${PARTICIPATING_NODES[@]}" [[ -z $network ]] && { network=$(getServiceNetwork nodes) } fi clmgr add service_ip $service_ip \ NETWORK=$network (( $? != 0 )) && { KLIB_SAP_SA_logmsg ERROR 30 16 sapsa.cat "Unexpected error encountered while attempting to create PowerHA SystemMirror service IP label: %s\n" $service_ip InternalErrorAbort } return 0 } function updateInstanceAttributes { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset appid=$APPLICATION_ID typeset -A servicelabel=$VIRTUAL_IPS typeset odmvg=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "application_id=$appid and name=VOLUME_GROUP" -f value HACMPsa_metadata) typeset odmvip=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "application_id=$appid and name=VIRTUAL_IPS" -f value HACMPsa_metadata) typeset rgname=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "application_id=$appid and name=RESOURCE_GROUP" -f value HACMPsa_metadata) typeset odmnw=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "application_id=$appid and name=SERVICE_NETWORK" -f value HACMPsa_metadata) typeset odmdbrg=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "application_id=$appid and name=DBRG" -f value HACMPsa_metadata) typeset dependentrg=$(echo $odmdbrg"+"$rgname ) odmvg=$(echo $odmvg) odmvip=$(echo $odmvip) if [[ -z $odmdbrg ]]; then odmdbrg="NONE" fi if [[ $odmdbrg != $DBRG ]]; then if [[ "$odmdbrg" == "NONE" && "$DBRG" != "NONE" ]]; then # No depedency is exist so add START AFTER dependency with DBRG /usr/es/sbin/cluster/utilities/clrgdependency -t'START_AFTER' -a -c$rgname -p $DBRG >> /dev/null elif [[ "$odmdbrg" != "NONE" && "$DBRG" == "NONE" ]]; then # For SAP smart assist, we only add START_AFTER dependency. # As provided DBRG is empty, we just have to delete the dependency if exist /usr/es/sbin/cluster/utilities/clrgdependency -t'START_AFTER' -sl | grep -w $rgname | read source target if [[ -n $source && -n $target ]] then /usr/es/sbin/cluster/utilities/clrgdependency -t'START_AFTER' -d -c$source -p$target fi elif [[ $DBRG != "NONE" ]]; then # For SAP smart assist, we only add START_AFTER dependency. # Remove the dependency if exist and add new dependency. /usr/es/sbin/cluster/utilities/clrgdependency -t'START_AFTER' -sl | grep -w $rgname | read source target if [[ -n $source && -n $target ]] then /usr/es/sbin/cluster/utilities/clrgdependency -t'START_AFTER' -d -c$source -p$target fi /usr/es/sbin/cluster/utilities/clrgdependency -t'START_AFTER' -a -c$rgname -p $DBRG >> /dev/null fi # Update HACMPsa_metadata ODMDeleteandAdd "$APPLICATION_ID" "DBRG" fi if [[ -n $VOLUME_GROUP && "$odmvg" != "$VOLUME_GROUP" ]] ; then if [[ $VOLUME_GROUP == "LOCAL" ]]; then odmdelete -o HACMPresource -q "group=$rgname and name=VOLUME_GROUP" else /usr/es/sbin/cluster/utilities/clmgr modify resource_group $rgname VOLUME_GROUP="$VOLUME_GROUP" fi # Update HACMPsa_metadata ODMDeleteandAdd "$APPLICATION_ID" "VOLUME_GROUP" fi if [[ "$odmvip" != "$VIRTUAL_IPS" || "$odmnw" != "$SERVICE_NETWORK" ]]; then if [[ $odmnw != "LOCAL" && $SERVICE_NETWORK == "LOCAL" ]]; then /usr/es/sbin/cluster/utilities/clmgr modify resource_group $rgname SERVICE_LABEL="" elif [[ $SERVICE_NETWORK != "LOCAL" ]]; then typeset local_node_name=$(/usr/es/sbin/cluster/utilities/get_local_nodename 2>/dev/null) for ip in ${servicelabel[@]} ; do if (( $ers_invoke == 0 ));then [[ "$local_node_name" == "$PRINODE" ]] && { /usr/es/sbin/cluster/sa/sap/sbin/cl_checkIsAliasAddr $ip } || { cl_rsh $PRINODE "/usr/es/sbin/cluster/sa/sap/sbin/cl_checkIsAliasAddr $ip" } (( $? != 0 )) && { KLIB_SAP_SA_logmsg ERROR 30 27 sapsa.cat "SAP Instance name: %s is configured to run on node %s with an IP %s which is found to be an a non aliased address.\n" $INSTANCE\ $PRINODE $VIRTUAL_IPS InternalErrorAbort } addServiceIPLabeltoNetwork $ip $SERVICE_NETWORK else [[ "$local_node_name" == "${participating_nodes[0]}" ]] && { /usr/es/sbin/cluster/sa/sap/sbin/cl_checkIsAliasAddr $ip } || { cl_rsh ${participating_nodes[0]} "/usr/es/sbin/cluster/sa/sap/sbin/cl_checkIsAliasAddr $ip" } (( $? != 0 )) && { KLIB_SAP_SA_logmsg ERROR 30 27 sapsa.cat "SAP Instance name: %s is configured to run on node %s with an IP %s which is found to be an a non aliased address.\n" $INSTANCE\ ${participating_nodes[0]} $VIRTUAL_IPS InternalErrorAbort } addServiceIPLabeltoNetwork $ip $SERVICE_NETWORK fi done #### Handling case for change of SCS service ip lable should change ERS MISC data ##### if (( $scs_invoke == 1 ));then count=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "application_id=$appid and name=INSTANCE_NUMBERS" -f value HACMPsa_metadata | wc -l ) ers_instance_name=$(/usr/es/sbin/cluster/utilities/clmgr -a MISC_DATA query RG $rgname | awk -F "=" '{print $2}'| sort -u | sed s/\"//g | awk -F "_" '{print $1}') ers_app_id=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "name=INSTANCE_NAMES and value=$ers_instance_name" -f application_id HACMPsa_metadata) ers_rg_name=$(/usr/es/sbin/cluster/utilities/clodmget -n -q "name=RESOURCE_GROUP and application_id=$ers_app_id" -f value HACMPsa_metadata) if (( $count > 1 ));then typeset -i c=0 for ip in $VIRTUAL_IPS do scs[$c]=$(grep -w "$ip" "/usr/sap/sapservices" | awk -F " " '{print $5}' | awk -F "/" '{print $7}' | awk -F "_" '{print $2}') ips_add[$c]=$ip (( c++ )) done new_scs_names=$(echo ${scs[0]}"_"${scs[1]}) if (( $c > 1 ));then new_scs_ips=$(echo ${ips_add[0]}"_"${ips_add[1]}) else new_scs_ips=$(echo ${ips_add[0]}) fi new_misc_data=$(echo $new_scs_names","$new_scs_ips) else odm_value=$(/usr/es/sbin/cluster/utilities/clmgr -a MISC_DATA query RG $ers_rg_name |awk -F "=" '{print $2}'| sort -u | sed s/\"//g ) odm_instance=$(echo $odmvalue | awk -F "," '{print $1}') new_misc_data=$(echo $odm_instance","$new_ip) fi /usr/es/sbin/cluster/utilities/clmgr modify resource_group $ers_rg_name MISC_DATA="$new_misc_data" fi /usr/es/sbin/cluster/utilities/clmgr modify resource_group $rgname SERVICE_LABEL="$VIRTUAL_IPS" fi # Update HACMPsa_metadata ODMDeleteandAdd "$APPLICATION_ID" "VIRTUAL_IPS" "SERVICE_NETWORK" fi if (( $ers_invoke == 0 ));then /usr/es/sbin/cluster/utilities/clmgr modify resource_group $rgname NODES="$PRINODE $TAKENODE" else /usr/es/sbin/cluster/utilities/clmgr modify resource_group $rgname NODES="${participating_nodes[*]}" fi } #---------------------------------------------------------------------------- # Function: # modifySAPGFS # # Purpose: # To Modify HACMP resources and resource group for managing SAP Global # filesystems. # First, it will remove all the resources associated earlierand # recreates them with the revised values. # # Arguments: # None. # # Returns: # 0 For sucess # 1 For failure #---------------------------------------------------------------------------- function modifySAPGFS { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset REPLICATED_ENABLED_SAVG="false" typeset VARYON_WITH_MISSING_UPDATES="" typeset FORCED_VARYON="" typeset DATA_DIVERGENCE_RECOVERY="" typeset stype="" typeset site_policy="" typeset RESOURCE_GROUP=$(getMetadata $APPLICATION_ID RESOURCE_GROUP) if [[ -n $RESOURCE_GROUP ]] && [[ -n $(clodmget -q "application_id=$RESOURCE_GROUP and name=JSON_FILENAME" -n -f value HACMPsa_metadata) ]] then REPLICATED_ENABLED_SAVG="true" # Take backup of current GLVM related config values which will be lost with SA modify VARYON_WITH_MISSING_UPDATES=$(clodmget -q "group=$RESOURCE_GROUP and name=VARYON_WITH_MISSING_UPDATES" -n -f value HACMPresource) FORCED_VARYON=$(clodmget -q "group=$RESOURCE_GROUP and name=FORCED_VARYON" -n -f value HACMPresource) DATA_DIVERGENCE_RECOVERY=$(clodmget -q "group=$RESOURCE_GROUP and name=DATA_DIVERGENCE_RECOVERY" -n -f value HACMPresource) stype=$(clodmget -q "group=$RESOURCE_GROUP" -n -f stype HACMPgroup) case "$stype" in PPS) site_policy=primary ;; OES) site_policy=either ;; OBS) site_policy=both ;; *) site_policy=ignore ;; esac fi sapsystem=$(getMetadata $APPLICATION_ID SAPSYSTEMNAME) user_msg 40 3 removeHAComponents $APPLICATION_ID || return $? SAPSYSTEMNAME=$sapsystem /usr/es/sbin/cluster/sa/sap/sbin/cl_addsapinstance \ -t 'GFS' \ -p"$PRINODE" \ -T"$TAKENODE" \ -I"$VIRTUAL_IPS" \ -V"$VOLUME_GROUP" \ -E"$EXPORT_DIR" \ -M"$MOUNT_DIR" \ -s"$SAPSID" rc=$? if [[ "$REPLICATED_ENABLED_SAVG" == "true" ]] && (( $rc == 0 )); then # Restore the GLVM config with backed up values clmgr modify rg $RESOURCE_GROUP VARYON_WITH_MISSING_UPDATES=$VARYON_WITH_MISSING_UPDATES FORCED_VARYON=$FORCED_VARYON DATA_DIVERGENCE_RECOVERY=$DATA_DIVERGENCE_RECOVERY SITE_POLICY=$site_policy rc=$? if (( $rc == 0 )); then # Reconfigure the appserv and appmon with GLVM wizard # Using new hidden option -S to stop automatic cluster sync; user will do sync. /usr/es/sbin/cluster/glvm/utils/cl_glvm_configuration -S -c "$RESOURCE_GROUP" if (( $? != 0 )); then # Let the user run this command print "$(date) $PROGRAM[$LINENO]: WARNING: Failed to restore GLVM replicated resource for Smart Assist Resource Group $RESOURCE_GROUP.\n\ Run following command manually to make sure GLVM statistics are collected and shown in PowerHA GUI.\n\ /usr/es/sbin/cluster/glvm/utils/cl_glvm_configuration -c $RESOURCE_GROUP\n" >> $clutils_log user_msg 30 50 "$RESOURCE_GROUP" exit 0 fi fi fi return $rc } #---------------------------------------------------------------------------- # Function: # modifySAPSCSInstance # # Purpose: # To Modify HACMP resources and resource group for managing SAP SCS instance(s). # First, it will remove all the resources associated earlier and # re-creates them with the revised values. # # Arguments: # None. # # Returns: # 0 For sucess # 1 For failure #---------------------------------------------------------------------------- function modifySAPSCSInstance { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x ers_invoke=0 scs_invoke=1 updateInstanceAttributes "$APPLICATION_ID" "$PRINODE" "$TAKENODE" "$VIRTUAL_IPS" "$SERVICE_NETWORK" "$VOLUME_GROUP" "$DBRG" } #---------------------------------------------------------------------------- # Function: # modifySAPERSInstance # # Purpose: # To Modify HACMP resources and resource group for managing SAP ERS instance(s). # First, it will remove all the resources associated earlier and # re-creates them with the revised values. # # Arguments: # None. # # Returns: # 0 For sucess # 1 For failure #---------------------------------------------------------------------------- function modifySAPERSInstance { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x ers_invoke=1 updateInstanceAttributes "$APPLICATION_ID" "$PARTICIPATING_NODES" "$VIRTUAL_IPS" "$SERVICE_NETWORK" "$VOLUME_GROUP" "$DBRG" } #---------------------------------------------------------------------------- # Function: # modifySAPASInstance # # Purpose: # To Modify HACMP resources and resource group for managing SAP AS instance. # First, it will remove all the resources associated earlier and # re-creates them with the revised values. # # Arguments: # None. # # Returns: # 0 For sucess # 1 For failure #---------------------------------------------------------------------------- function modifySAPASInstance { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x ers_invoke=0 updateInstanceAttributes "$APPLICATION_ID" "$PRINODE" "$TAKENODE" "$VIRTUAL_IPS" "$SERVICE_NETWORK" "$VOLUME_GROUP" "$DBRG" } #----------------------------------------------------------------------------- # Function: # removeHAComponents # # Purpose: # Remove the application monitors, application servers, and resource groups # from the HACMP configuration. In addition the HACMP metadata stored for # this instance is removed from the cluster configuration. # # Arguments: # (1) application name # # Returns: # 0 on success # 1 on failure #---------------------------------------------------------------------------- function removeHAComponents { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset -L32 appid=$1 typeset applications=$(getResource $appid APPLICATIONS) (( ${#applications[*]} > 0 )) && { for app in ${applications[*]}; do MONITORS=$(clvt query application $app | \ grep ASSOCIATEDMONITORS | \ awk -F= '{ print $2 }' | sed -e "s/\"//g") for monitor in $MONITORS; do #Skipping "clam_nfsv4" monitor if [[ $monitor == "clam_nfsv4" ]] then continue fi clvt delete application_monitor $monitor (( $? != 0 )) && { user_msg 30 18 $monitor InternalErrorAbort } done #Skipping "clas_nfsv4" application if [[ $app == "clas_nfsv4" ]] then continue fi clvt delete application $app > /dev/null 2>&1 (( $? != 0 )) && { user_msg 30 47 $app InternalErrorAbort } done } for rg in $(getResourceGroups $appid); do #Check if any parent/child relationship exists clrgdependency -t PARENT_CHILD -sl | grep -w $rg | read parent child if [[ -n "$parent" && -n "$child" ]] then /usr/es/sbin/cluster/utilities/clrgdependency \ -t'PARENT_CHILD' -d \ -p"$parent" \ -c"$child" fi clvt delete resource_group $rg >/dev/null 2>&1 (( $? != 0 )) && { user_msg 30 20 $rg InternalErrorAbort } done # Remove the application (SA) references for this instance clrmsaapp -a $appid (( $? != 0 )) && { user_msg 30 21 $appid InternalErrorAbort } return 0 } #---------------------------------------------------------------------------- # Function: # validateAppName # # Purpose: # Validate the application name and ensure it contains only # valid characters # # Arguments: # (1) - Application Name # # Returns: # 0 on valid name # 1 name is invalid #---------------------------------------------------------------------------- function validateAppName { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset appname=$1 typeset -i invalid=0 [[ -n ${appname//[a-zA-Z0-9_]/} ]] && { user_msg 30 10 $appname return 1 } return 0 } #---------------------------------------------------------------------------- # Function: # validateNodeLists # # Purpose: # Ensure the node lists contain nodes that are unique, primary # and takeover nodes should not have duplicates between the two # lists. # # Arguments: # (1) by reference - primary node # (2) by reference - takeover node list # # Returns: # 0 on success # 1 on failure # function validateNodeLists { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset -n primary=$1 typeset -n takeover=$2 typeset -A nodes nodes[$primary]=$primary for node in $takeover; do [[ -n ${nodes[$node]} ]] && { user_msg 30 9 $node return 1 } done return 0 } #---------------------------------------------------------------------------- # Function: # validateParticipatingNodes # # Purpose: # Ensure all the Participating nodes are part of cluster # # Arguments: # (1) by reference - Participating nodes # # Returns: # 0 on success # 1 on failure # function validateParticipatingNodes { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset -n nodelist=$1 typeset cluster_nodelist=$(cllsnode -cS | cut -f1 -d":") for node in $nodelist do if ! print "$cluster_nodelist" | grep -w $node 2>&1 >> /dev/null then user_msg 30 48 $node return 1 fi done return 0 } #---------------------------------------------------------------------------- # Main #---------------------------------------------------------------------------- ALLARGS=$* while getopts t:s:i:a:p:T:I:V:E:M:P:n:r: option do case $option in t) # Type of instance TYPE=$OPTARG ;; s) # SAPSYSTEMNAME SAPSID=$OPTARG ;; i) # SAP Instance Name INSTANCE=$OPTARG # We need to grep Instance No from Instance name if [[ ! -z $INSTANCE ]]; then INSTANCE_NO=${INSTANCE##${INSTANCE%%??}} fi ;; a) # Application ID APPLICATION_ID=$OPTARG APPLICATION_ID=${APPLICATION_ID// /} # Remove whitespace validateAppName $APPLICATION_ID (( $? != 0 )) && exit 1 ;; p) # Primary node PRINODE=$OPTARG ;; T) # Takeover node [[ -n $TAKENODE ]] && TAKENODE=$OPTARG [[ -z $TAKENODE ]] && TAKENODE="$TAKENODE $OPTARG" ;; I) # Service IP labels VIRTUAL_IPS=$OPTARG ;; V) # Shared Volume Groups VOLUME_GROUP=$OPTARG ;; E) #Export Directories (used with type=GFS) EXPORT_DIR=$OPTARG ;; M) #Mount Directories (used with type=GFS) MOUNT_DIR=$OPTARG ;; P) #Participating nodes (used with type=ERS) PARTICIPATING_NODES=$OPTARG ;; n) #Network Interface for Service IP SERVICE_NETWORK=$OPTARG ;; r) #DataBase RG DBRG=$OPTARG ;; *) ;; esac done if [[ -z $DBRG ]] then DBRG="NONE"; fi SAPADMNUSR=$(echo $SAPSID | tr '[:upper:]' '[:lower:]')adm # No type specified, abort immediately [[ -z $TYPE ]] && exit 1 # Ensure the node lists for primary and takeover # do not contain duplicates (a single node used more than once) if [[ -n $PRINODE || -n $TAKENODE ]] then validateNodeLists PRINODE TAKENODE fi if [[ -z $PARTICIPATING_NODES ]] then PARTICIPATING_NODES="$PRINODE $TAKENODE" fi validateParticipatingNodes PARTICIPATING_NODES # Removing duplicate node names from Participating nodes PARTICIPATING_NODES=$(echo $PARTICIPATING_NODES| tr " " "\n" | sort | uniq) if [[ -n $PARTICIPATING_NODES ]];then typeset -A participating_nodes typeset -i count=0 for node in $PARTICIPATING_NODES do participating_nodes[$count]=$node ((count=$count+1)) done # Valdiate ERS nodes since ERS participating nodes should be part of SCS nodes and order should not be same. if [[ -n $INSTANCE_NO && $TYPE == ERS ]] then for node in $PARTICIPATING_NODES;do output=$(cl_rsh $node "/usr/bin/su - $SAPADMNUSR -c \"env LANG=C \ sapcontrol -nr $INSTANCE_NO -function ParameterValue SCSID\"" 2>/dev/null \ | egrep -v "OK|ParameterValue|[0-9] " ) if [[ -z $output ]];then continue else if [[ ! $(echo $output | grep "NIECONN_REFUSED" ) ]];then scs_nos=$(echo $output | sed 's/^[ \t]*//;s/[ \t]*$//') break; fi fi done typeset SCS_NODES if [[ -n $scs_nos ]] then typeset sap_scs_appid sap_scs_appid=$(clodmget -n -q "name=INSTANCE_NUMBERS and value=\"${scs_nos}\"" -f application_id HACMPsa_metadata) [[ -z $sap_scs_appid ]] && { KLIB_SAP_SA_logmsg ERROR 30 28 sapsa.cat "Unable to find SAP SCS instance resources configured on the cluster.\n\ Please configure SAP SCS instances using Smart Assist for SAP.\n" InternalErrorAbort } SCS_NODES=$(getNodes $sap_scs_appid) else # We are here as we couldn't get the SCS Nodes from SAP binaries. Try to fetch SCS nodes from ODM. SCSRG=$(clodmget -n -q "name=MISC_DATA" -f group,value HACMPresource | grep -w "$INSTANCE" | cut -d: -f1) SCS_NODES=$(clodmget -n -f nodes -q "group=$SCSRG" HACMPgroup) if [[ -z $SCS_NODES ]] then # We couldn't get the SCS nodes information either using SAP binaries and ODM, exiting with error KLIB_SAP_SA_logmsg ERROR 30 28 sapsa.cat "Unable to find SAP SCS instance resources configured on the cluster.\n\ Please configure SAP SCS instances using Smart Assist for SAP.\n" InternalErrorAbort fi fi typeset -i k=0 typeset -A scs_nodes for node in $SCS_NODES do scs_nodes[$k]=$node ((k=$k+1)) done #Check, if the participating nodes are SAP SCS ready? #ERS instances must run only on nodes where SCS instances are configured to run typeset ers_scs_nodes ers_scs_nodes="$SCS_NODES" for node in $PARTICIPATING_NODES; do KLIB_UTIL_LIST_is_in_list ers_scs_nodes $node || { KLIB_SAP_SA_logmsg ERROR 30 29 sapsa.cat "SAP SCS instance(s) is/are not configured on %s node where %s has been selected to run.\n\ Participating node list must match the node list of SCS instances.\n" $node $INSTANCE InternalErrorAbort } done ## This code will look for primary node of corresponding ## central instance of ERS and will modify primary node of ERS ## if it has same primary node of its central instance if [[ ${participating_nodes[0]} == ${scs_nodes[0]} ]];then typeset temp temp=${participating_nodes[0]} typeset -i p p=$count-1 participating_nodes[0]=${participating_nodes[$p]} participating_nodes[$p]=$temp fi fi fi typeset -A modifyInstanceMethod modifyInstanceMethod=( [GFS]="modifySAPGFS" [AS]="modifySAPASInstance" [SCS]="modifySAPSCSInstance" [ERS]="modifySAPERSInstance" ) if [[ -z ${modifyInstanceMethod[$TYPE]} ]]; then exit 1 fi ${modifyInstanceMethod[$TYPE]} exit $?