#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2017,2021. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # @(#) 7d4c34b 43haes/usr/sbin/cluster/sa/sap/sbin/sap_powerha_cluster_connector.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM ## ## NAME: sap_powerha_cluster_connector ## ## PURPOSE: ## OS-connector to the SAP HA API in sapstartsrv ## ## ARGUMENTS: ## displayed by sap_powerha_cluster_connector help ## OUTPUT: ## ## RETURNS: ## function specific as defined by SAP: 0,1,2,3,4 . /usr/es/lib/ksh93/func_include if [[ $SAP_CONNECTOR_VERSION != '' ]]; then typeset sap_ha_version="1.5" else typeset sap_ha_version="2.0" fi #---------------------------------------------------------------------------- # Global Definitions #---------------------------------------------------------------------------- if [[ -z $ODMDIR ]]; then ODMDIR=/usr/es/sbin/cluster/etc/objrepos/active/ fi export ODMDIR CLVTCMD=/usr/es/sbin/cluster/utilities/clvt SAPSA_SBIN=/usr/es/sbin/cluster/sa/sap/sbin REQUIREDERSPATCHLEVEL=152 SALOGFILEPATH=/var/hacmp/log KSSLOGFILE=$(echo "$SALOGFILEPATH/sapsa.log") KLIB_HACMPLOG_ENTRY=sapsa.log KLIB_OUTPUT_CONSOLE="true" OSCON_LOG_FILE=$(echo "/tmp/sap_powerha_script_connector_$USER.log") OSCON_LogLevel=3 OSCON_OnOff_CS=1 OSCON_OnOff_ERS=1 OSCON_OnOff_App=1 typeset PROGRAM=${0##*/} PATH=$PATH:/usr/es/sbin/cluster/sa/sbin PATH=$PATH:/usr/es/sbin/cluster/utilities PATH=$PATH:/bin:/usr/bin export PATH ############################################################# # simulate enum SAP_HA_CHECK_RESULT definition from sapha.h SAP_HA_CHECK_SUCCESS=0 SAP_HA_CHECK_WARNING=1 SAP_HA_CHECK_ERROR=2 ################################################################# # simulate enum SAP_HA_CHECK_CATEGORY definition from sapha.h SAP_HA_SAP_CONFIGURATION=0 # General SAP configuration check SAP_HA_SAP_STATE=1 # General SAP state check SAP_HA_HA_CONFIGURATION=2 # HA product specific configuration check SAP_HA_HA_STATE=3 # HA product specific state check ############################################## # Get input params SAP_HA_INTERNALERROR=1 ERROR_INVALID_ARGS=2 FILE="________" SID="XXX" INO=999 ACT="NOTHING" RES="________" NODE="________" function log { # call: log (( $1 > ${OSCON_LogLevel} )) || { typeset DATE="$(date +"%y%m%d %H:%M:%S") " shift 1 print "${DATE} $@" >> $OSCON_LOG_FILE } } #---------------------------------------------------------------------------- # Function: # sap_get_local_nodename # # Purpose: # get_local_nodename works only for root user and rbac allowed user, # sap_get_local_nodename is used to get the local node name for SAP user. # # Returns: # local-nodename success # NULL failure #---------------------------------------------------------------------------- function sap_get_local_nodename { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset OP_SEP="$(cl_get_path -S)" typeset AIXODMDIR="/etc/objrepos" typeset HAODMDIR="/etc/es/objrepos" export ODMDIR=$HAODMDIR # : First, check to see if the nodename is in the HACMPcluster object # typeset nodename="" cllsclstr -N | read nodename typeset -i rc=$? (( $rc != 0 )) && exit $rc # : If the node name in HACMPcluster, $nodename, : matches a configured node, we are done. # if [[ -n $nodename && -n $(/usr/bin/odmget HACMPcluster | grep -w "nodename" | awk '{print $3}' | sed -e 's/\"//g') ]] then print -- "$nodename" exit 0 fi nodelist=$(/usr/bin/odmget HACMPnode | grep -w "name" | uniq | awk '{print $3}' | sed -e 's/\"//g') #nodelist=${nodelist//\"/} typeset addr="" addr_v6="" for nodename in $nodelist do for addr in $(cllsif -J"$OP_SEP" -Si $nodename |\ grep ${OP_SEP}boot${OP_SEP} |\ cut -d"$OP_SEP" -f7) do # : Check the address, $addr, as a local IPv4 address # if [[ -n $(ODMDIR=$AIXODMDIR odmget -q"value = $addr AND attribute = netaddr" CuAt) ]] then print -- "$nodename" exit 0 fi # : Check the address, $addr, as a local IPv6 address # if [[ -n $(ODMDIR=$AIXODMDIR odmget -q"value = $addr AND attribute = netaddr6" CuAt) ]] then print -- "$nodename" exit 0 fi # : An IPv6 address may not show up in the CuAt ODM if it was : added by autoconf, so also check for the address, $addr, : in ifconfig. # for addr_v6 in $(ODMDIR=$AIXODMDIR LC_ALL=C ifconfig -a |\ grep -w inet6 | cut -d' ' -f2) do addr_v6=${addr_v6%%/*} # Eliminate zone number if [[ $addr == $addr_v6 ]]; then print -- "$nodename" exit 0 fi done done done # : Local node name not found # print } CMD_STRING="$@" if (( $# == 0 )); then ACTION="help" else ACTION=$1 # help, init, cpa, lsr, fra, lsn, hcc, gvi shift 1 until (( $# == 0 )); do [[ -n $1 ]] && { option=$(echo $1 | awk -F "--" '{print $2}') shift OPTARG=$1 HELP_OPTARG=$(echo $OPTARG | grep "\-\-help") # if [[ $HELP_OPTARG == "" ]] then case $option in res) #RG/APP Name if [[ $HELP_OPTARG == "" ]] then RES=$OPTARG log 3 "INFO: Found res = '$RES'.\n" else log 3 "ERROR: Found res = '$OPTARG'.\n" log 3 "ERROR: 'Required value for option '--res ' is missing.\n" fi ;; out) #file to write output (!! SAP Specific Format required !!) if [[ $HELP_OPTARG == "" ]] then FILE=$OPTARG log 3 "INFO: Found out = '$FILE'.\n" else log 3 "ERROR: Found out = '$OPTARG'.\n" log 3 "ERROR: 'Required value for option '--out ' is missing.\n" fi ;; act) #start | stop if [[ $HELP_OPTARG == "" ]] then ACT=$OPTARG log 3 "INFO: Found act = '$ACT'.\n" else log 3 "ERROR: Found act = '$OPTARG'.\n" log 3 "ERROR: 'Required value for option '--act ' is missing.\n" fi ;; sid) if [[ $HELP_OPTARG == "" ]] then SID=$OPTARG log 3 "INFO: Found sid = '$SID'.\n" else log 3 "ERROR: Found sid = '$OPTARG'.\n" log 3 "ERROR: 'Required value for option '--sid ' is missing.\n" fi ;; ino) if [[ $HELP_OPTARG == "" ]] then INO=$OPTARG log 3 "INFO: Found ino = '$INO'.\n" else log 3 "ERROR: Found ino = '$OPTARG'.\n" log 3 "ERROR: 'Required value for option '--ino ' is missing.\n" fi ;; nod) #PowerHA node if [[ $HELP_OPTARG == "" ]] then NODE=$OPTARG log 3 "INFO: Found nod = '$NODE'.\n" else log 3 "ERROR: Found nod = '$OPTARG'.\n" log 3 "ERROR: 'Required value for option '--nod ' is missing.\n" fi ;; *) #error or warning msg log 3 "WARNING: Found invalid argument '$option' in " $CMD_STRING "'.\n" ;; esac # fi } if [[ $# != 0 ]]; then if [[ $HELP_OPTARG == "" ]] then shift fi else log 3 "ERROR: Wrong parameter: Missing argument in: '" $CMD_STRING "'.\n" fi done fi # endo of if [[ $# == 0 ]] function on_error { typeset errc=$1 echo "HACMPsap_connector: \n\ value=0" |/usr/bin/odmchange -o HACMPsap_connector -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" exit $errc } #The following variations can occur: #help #init #cpa --res RES --act ACT #lsr --out FILE --sid SID --ino INO | --dbhost HOST --dbtype TYPE #fra --res RES --act ACT [ --nod NODE ] #lsn --out FILE --res RES #hcc --out FILE --sid SID --ino INO #gvi --out case $ACTION in help | --help ) name=$(basename ${0}) echo "This code interlinks SAP and PowerHA. It is to be called by sapstartsrv HA framework and not intended to be called directly." echo "${name} help" echo "Print help message." echo "${name} init" echo "Initialization." echo "${name} cpa --res --act " echo "Check for pending action: Verification from where this request is comming." echo "${name} lsr --out --sid --ino " echo "List sap resources: determine resourcegroup name for a given instance." echo "${name} fra --res --act [--nod ]" echo "Fire resource action: request to start/stop the previously discovered instance." echo "${name} lsn --out --res " echo "List sap nodes: determine the valid nodes for a given instance." if [[ -z $SAP_CONNECTOR_VERSION ]]; then echo "${name} hcc --out --sid --ino " echo "API2.0: HA Configuration Check" echo "${name} gvi --out " echo "API2.0: get failover configuration information " fi ;; init ) #called when starting sapstartsrv #decission is taken if run with or without Cluster connectivity #Return Coder #0: success; #1: no success / internal error; #2: Not a clustered Resource. #3: Start in progress by PowerHA #4: Stop in Progress by PowerHA #5: BUFF_TOO_SMALL #6: Ha Timeout. log 3 "INFO: call function init.\n" #verify if logs can be written [[ -e ${OSCON_LOG_FILE} ]] || { log 3 "INFO: Log file does not exist. Create ${OSCON_LOG_FILE}.\n" touch ${OSCON_LOG_FILE} chmod 700 ${OSCON_LOG_FILE} } #if we have a log level below 2 we cleanup previously created logs [[ ${OSCON_LogLevel} < 2 ]] && { rm ${OSCON_LOG_FILE} touch ${OSCON_LOG_FILE} chmod 700 ${OSCON_LOG_FILE} } #check for minimum PowerHA level Min="7130" IS=$(lslpp -L cluster.es.server.rte |grep cluster.es.server |awk '{print $2}' |tr -d .) (( $IS < $Min )) && { log 0 "ERROR: The installed PowerHA release does not support this. A minimum of %s is required.\n" ${Min} return 1 } #check for SA resources configured # cannot use clodmget as this is called as Oracle user. [[ $(/usr/bin/odmget -q "name=INSTANCE_NUMBERS" HACMPsap_connector 2>/dev/null |grep -c value | sort -u | sed -e 's/\"//g' ) != 0 ]] || { log 0 "ERROR: PowerHA has no SAP SA Resources configured.\n" return 1 } #check for cluster started lssrc -ls clstrmgrES | grep "ST_NOT_CONFIGURED" [[ $? == 0 ]] && { log 1 "Node was never started. \n Return code 3.\n" return 3 } lssrc -ls clstrmgrES | grep "ST_INIT" [[ $? == 0 ]] && { # 4: cluster commands are installed correctly but cluster framework is not running (optional) log 1 "Node configured but not started.\n Return code 4.\n" return 4 } return 0 ;; gvi ) ################################################################################# # gvi (get version info) # This command prints the version info of the HA product and the SAP HA interface as well as a URL for the documentation and a potential SAP certification note. # #Parameters # - number of parameters : 2 # - all parameters are mandatory #Paramter names: # --out FILE Output file to be used.. # #Output is returned as single lines in FILE # # #Returns: # 0=success, a list of string values is written (one per line) # # 1=SAP_HA_INTERNALERROR failure when accessing script interface of PowerHA, error messages are written to error log # # 2=ERROR_INVALID_ARGS invalid input parameters or output file could not be written # # # # if [[ $SAP_CONNECTOR_VERSION != '' ]]; then log 1 "Connector Version should be atleast 2.0 for this function" else if [[ $FILE == "________" ]]; then log 3 "ERROR: No file name is passed as input. 'Required parameter '--out ' is missing.\n" return ERROR_INVALID_ARGS; fi log 3 "INFO: call gvi -out ${FILE}.\n" INFO_HACMP=$(lslpp -LI cluster.es.server.rte 2>/dev/null | grep 'cluster.es.server.rte' | awk '{if (index($3,"C")) print $2}') if [[ $0 != "/"* ]]; then help_dirname=$(dirname $0) if [[ $help_dirname == "." ]] then dir_of_sap_powerha_cluster_connector=$(pwd) fi else # dir_of_sap_powerha_cluster_connector=`dirname $0` dir_of_sap_powerha_cluster_connector="" echo $dir_of_sap_powerha_cluster_connector $0 fi out_HAPROD="IBM PowerHA System Mirror ${INFO_HACMP}" #out_HAPROD_SAP="IBM PowerHA System Mirror script connector- ${dir_of_sap_powerha_cluster_connector}/$0 ${sap_ha_version}" out_HAPROD_SAP="IBM PowerHA System Mirror script connector- sap_powerha_cluster_connector ${sap_ha_version}" out_HAPROD_DOC="See ${out_HAPROD} for AIX Reference Guide: http://www.redbooks.ibm.com/redbooks.nsf/RedpieceAbstracts/sg248167.html?Open" out_HASCRIPTCO_VERS="2" [[ -e ${FILE} ]] || { log 3 "WARNING: The file passed as input by SAP ${FILE} did not exist. Create it now.\n" touch ${FILE} #create it if not there chmod 755 ${FILE} } echo $out_HASCRIPTCO_VERS > ${FILE} echo $out_HAPROD >> ${FILE} echo $out_HAPROD_SAP >> ${FILE} echo $out_HAPROD_DOC >> ${FILE} return 0; fi ;; hcc ) if [[ $SAP_CONNECTOR_VERSION != '' ]]; then log 1 "Connector Version should be atleast 2.0 for this function" else if [[ $FILE == "________" ]]; then log 3 "ERROR: No file name is passed as input. 'Required parameter '--out ' is missing.\n" return ERROR_INVALID_ARGS; fi if [[ $INO == 999 ]]; then log 3 "ERROR: No INO is passed as input. 'Required parameter '--ino ' is missing.\n" return ERROR_INVALID_ARGS; fi if [[ $SID == "XXX" ]]; then log 3 "ERROR: No SID name is passed as input. 'Required parameter '--sid ' is missing.\n" return ERROR_INVALID_ARGS; fi log 3 "INFO: call hcc -out ${FILE} -ino ${INO} -sid ${SID}.\n" [[ -e ${FILE} ]] || { log 3 "WARNING: The file passed as input by SAP ${FILE} did not exist. Create it now.\n" touch ${FILE} #create it if not there chmod 755 ${FILE} } # SAP_HA_HA_CONFIGURATION CHK_STATE=$SAP_HA_CHECK_SUCCESS CHK_CATEGORY=$SAP_HA_HA_CONFIGURATION APP_ID=$(/usr/bin/odmget -q "name=INSTANCE_NUMBERS and value=$INO" HACMPsap_connector 2>/dev/null | grep -w application_id | awk '{print $3}' | sed -e 's/\"//g') APP_ID=$(echo $APP_ID | grep $SID) if [[ $APP_ID == "" ]] then echo "ERROR: Function hcc called for a non clustered Instance." CHK_STATE=$SAP_HA_CHECK_ERROR CHK_DESCRIPTION="SAP Instance with SAPSID $SID and Instance No $INO is not clusered!" CHK_COMMENT="IBM Power HA is not active for this SAP Instance $INO and $SID." else RG=$(/usr/bin/odmget -q "name = RESOURCE_GROUP and application_id=${APP_ID} " HACMPsap_connector 2>/dev/null | grep -w value | awk '{print $3}' | sed -e 's/\"//g') if [[ $RG == "" ]] then echo "ERROR: Function hcc called for a non clustered Instance." CHK_STATE=$SAP_HA_CHECK_ERROR CHK_DESCRIPTION="SAP Instance with SAPSID '$SID', SAP Instance Number '$INO' and Application ID '$APP_ID' is not clusered!" CHK_COMMENT="IBM Power HA is not active for SAPSID '$SID' and Instance Number '$INO'." else cl_name=$(clRGinfo -v $RG | grep -w "Cluster Name" | awk '{print $3}' ) if [[ $cl_name != "" ]] then CHK_DESCRIPTION="IBM Power HA manages Resource Group '$RG' of cluster '$cl_name'" CHK_COMMENT="IBM Power HA is active." else CHK_STATE=$SAP_HA_CHECK_ERROR CHK_DESCRIPTION="IBM Power HA is in error. Clustename for RG '$RG' can't be retrieved." CHK_COMMENT="IBM Power HA is not active." fi fi fi echo ${CHK_STATE}":"${CHK_CATEGORY}":"${CHK_DESCRIPTION}":"${CHK_COMMENT} > ${FILE} # SAP_HA_HA_STATE CHK_STATE=$SAP_HA_CHECK_SUCCESS CHK_CATEGORY=$SAP_HA_HA_STATE CHK_DESCRIPTION="Error in getting HA_STATE" CHK_COMMENT="IBM Power HA is not active." if [[ $RG != "" ]] then ON_nodes=$(/usr/es/sbin/cluster/utilities/clRGinfo -c $RG | grep "ONLINE" | awk -F : '{print $3}') OFF_nodes=$(/usr/es/sbin/cluster/utilities/clRGinfo -c $RG | grep "OFFLINE"| awk -F : '{print $3}') ERROR_nodes=$(/usr/es/sbin/cluster/utilities/clRGinfo -c $RG | grep "ERROR"| awk -F : '{print $3}') else CHK_STATE=$SAP_HA_CHECK_ERROR CHK_DESCRIPTION="SAP Instance with SAPSID '$SID', SAP Instance Number '$INO' and Application ID '$APP_ID' is not clusered!" CHK_COMMENT="IBM Power HA is not active for SAPID '$SID' and Instance '$INO'." echo ${CHK_STATE}":"${CHK_CATEGORY}":"${CHK_DESCRIPTION}":"${CHK_COMMENT} >> ${FILE} return 0 fi for Variable in $OFF_nodes do OFF_node_is_UP=$(lscluster -m $Variable | grep "State" | grep UP) if [[ $OFF_node_is_UP != "" ]] then OFF_nodes_UP="$OFF_nodes_UP $Variable" fi done SAP_INSTANCE_NAME=$(/usr/bin/odmget -q "application_id=$APP_ID and name=INSTANCE_NAMES" HACMPsa_metadata | grep -w value |cut -d= -f2 | awk '{print $1}' | sed -e 's/\"//g') if [[ $ON_nodes != "" ]] then CHK_DESCRIPTION="Resource Group '$RG' for Instance '$SAP_INSTANCE_NAME' is ONLINE on node '$ON_nodes'." if [[ $OFF_nodes_UP != "" ]] then CHK_DESCRIPTION="$CHK_DESCRIPTION RG '$RG' can failover to node(s) '$OFF_nodes_UP' " CHK_COMMENT="RG $RG is fine!" else CHK_DESCRIPTION="$CHK_DESCRIPTION RG '$RG' can't failover. " CHK_COMMENT="RG '$RG' cannot failover!" CHK_STATE=$SAP_HA_CHECK_WARNING fi echo ${CHK_STATE}":"${CHK_CATEGORY}":"${CHK_DESCRIPTION}":"${CHK_COMMENT} >> ${FILE} return 0 else if [[ $ERROR_nodes != "" ]] then CHK_STATE=$SAP_HA_CHECK_ERROR CHK_DESCRIPTION="IBM Power HA is active but resource Group '$RG' for Instance '$SAP_INSTANCE_NAME' is in ERROR." CHK_COMMENT="RG '$RG' cannot failover!" else CHK_STATE=$SAP_HA_CHECK_ERROR CHK_DESCRIPTION="IBM Power HA is active but resource Group '$RG' for Instance '$SAP_INSTANCE_NAME' is OFFLINE." CHK_COMMENT="RG $RG cannot failover!" fi echo ${CHK_STATE}":"${CHK_CATEGORY}":"${CHK_DESCRIPTION}":"${CHK_COMMENT} >> ${FILE} return 0 fi #echo $ON_nodes #echo $OFF_nodes #echo ${CHK_STATE}":"${CHK_CATEGORY}":"${CHK_DESCRIPTION}":"${CHK_COMMENT} >> ${FILE} return 0; fi # end of Version 2 code ;; cpa ) #Return Codes #0: success; #1: no success / internal error; #2: Not a clustered Resource. #3: Start in progress by PowerHA #4: Stop in Progress by PowerHA #5: BUFF_TOO_SMALL #6: Ha Timeout. log 3 "INFO: call cpa ${RES} ${ACT}.\n" typeset -i curtime=$(date +%s) # Guess supplied RES as APP-name RG=$(clodmget -q "name=RESOURCE_GROUP and application_id=${RES}" -n -f value HACMPsap_connector 2>/dev/null) # Guess supplied RES as RG-name APP_ID=$(clodmget -q "name=RESOURCE_GROUP and value=${RES}" -n -f application_id HACMPsap_connector 2>/dev/null) [[ -z $RG ]] && [[ -z $APP_ID ]] && { log 0 "ERROR(cpa): Function cpa called for a non clustered Instance." return 2 } # If supplied RES is APP-name, we get RG value from ODM. So, re-initialize the variables. # Else, RES will be RG-name and we get APP_ID from ODM. [[ -n $RG ]] && { APP_ID=${RES} RES=${RG} } #if we find no RG with given name we cannot continue /usr/bin/odmget -q "name = RESOURCE_GROUP" HACMPsap_connector 2>/dev/null | grep -w ${RES} [[ $? == 0 ]] || { log 0 "ERROR: there is no RG with the name ${RES} running in this cluster." return 2 # RG name not found } #start ????????????? # clRGinfo can be run by any user. if [[ $ACT == "start" ]] then if [[ $(clRGinfo -s | grep -v OFFLINE | grep ${RES} | cut -f2 -d:) == "AQUIRING" ]] then log 3 "INFO(cpa): Found pending action of start for ${RES}." return 3 fi if [[ $(/usr/bin/odmget -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}'| sed -e 's/\"//g') == "101" ]] then log 3 "INFO(cpa): We have identified a Admin Start for ${RES}." typeset -i STIN=$(/usr/bin/odmget -q "application_id=${APP_ID} and name=STABILIZATION_INTERVAL" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}'| sed -e 's/\"//g') (( curtime = curtime + STIN + 2 )) echo "HACMPsap_connector: \n\ value=$curtime" |/usr/bin/odmchange -o HACMPsap_connector -q "application_id=${APP_ID} and name=END_STABLE_TIME" return 3 fi if [[ $(/usr/bin/odmget -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}'| sed -e 's/\"//g') == "103" ]] then log 3 "INFO(cpa): We have identified a Admin Start for ${RES}." typeset -i STIN=$(/usr/bin/odmget -q "application_id=${APP_ID} and name=STABILIZATION_INTERVAL" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}'| sed -e 's/\"//g') (( curtime = curtime + STIN + 2 )) echo "HACMPsap_connector: \n\ value=$curtime" |/usr/bin/odmchange -o HACMPsap_connector -q "application_id=${APP_ID} and name=END_STABLE_TIME" return 3 fi fi #stop ????????????? if [[ $ACT == "stop" ]] then if [[ $(clRGinfo -s | grep -v OFFLINE | grep ${RES} | cut -f2 -d:) == "RELEASING" ]] then log 3 "INFO(cpa): Found pending action of stop for ${RES}." return 4 fi if [[ $(/usr/bin/odmget -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}' | sed -e 's/\"//g') == "0" ]] then lssrc -ls clstrmgrES | grep ST_STABLE [[ $? == 0 ]] && { echo "HACMPsap_connector: \n\ value=101" |/usr/bin/odmchange -o HACMPsap_connector -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" echo "HACMPsap_connector: \n\ value=0" |/usr/bin/odmchange -o HACMPsap_connector -q "application_id=${APP_ID} and name=END_STABLE_TIME" } log 3 "INFO(cpa): We have identified a Admin Stop for ${RES}." return 4 fi fi #if the cluster is ST_STABLE no action is pending lssrc -ls clstrmgrES | grep ST_STABLE [[ $? == 0 ]] && { log 3 "INFO(cpa): Cluster is in ST_STABLE. No pending action." return 0 } # Migrate is currently not supported with PowerHA 7.1.3 This is target for next release # [[ $ACT = "migrate" ]] && [[ 1 == 1 ]] && return 0 log 3 "INFO(cpa): No pending action found for ${RES}. But some cluster activity is ongoing." return 0; ;; lsr ) #Return Codes #0: success; #1: no success / internal error; #2: Not a clustered Resource. #3: Start in progress by PowerHA #4: Stop in Progress by PowerHA #5: BUFF_TOO_SMALL #6: Ha Timeout. #Output Formatted query answer to given FILE. #Multiple rows (data sets) allowed even if typically there should only be one resource matching the query. #Each row / data set covers its values in a colon-separated (':') list. #Format: SID:INO:RES:GRP:CLUSTER_NODES typeset application_id="" log 3 "INFO: call lsr ${FILE} ${SID} ${INO}.\n" [[ -e ${FILE} ]] || { log 3 "ERROR: The file passed as input by SAP ${FILE} did not exist. Create it now.\n" touch ${FILE} #create it if not there chmod 755 ${FILE} } #determine application id for given SID and INO. Requires to be also able to handle multible SIDs /usr/bin/odmget -q "value=${SID} and name=SAPSYSTEMNAME" HACMPsap_connector 2>/dev/null | grep application_id | awk '{print $3}' |sed -e 's/\"//g'| sort -u | while read name do log 3 "INFO(lsr): Loop through all application IDs. Current ID: ${name}.\n" #we travers all application IDS for the given SID in this loop. #Now search the INO and by that having the correct application ID to determine the resource group /usr/bin/odmget -q "application_id=${name} and name=INSTANCE_NUMBERS" HACMPsap_connector 2>/dev/null | grep ${INO} [[ $? == 0 ]] && { application_id=${name} log 3 "INFO(lsr): Application ID for Instance number ${INO} is ${application_id}.\n" } [[ -z $application_id ]] && { continue } #find RG name for given application ID RES=$(/usr/bin/odmget -q "name=RESOURCE_GROUP and application_id=${application_id}" HACMPsap_connector 2>/dev/null |grep -w value | awk '{print $3}' |sed -e 's/\"//g') CLUSTER_NODES=$(/usr/es/sbin/cluster/utilities/clRGinfo -c $RES | awk -F : '{print $3}' | sed 's/\"//g' | tr '\n' ' ') # We get the RG,now checking this RG is part of local node or not ? if not we don't add this entry to FILE to process further local_node_name=$(sap_get_local_nodename) for tnode in $CLUSTER_NODES do if [[ "$tnode" == "$local_node_name" ]] then # add this RG entry to the FILE and return echo "${SID}:${INO}:${RES}:${application_id}:${CLUSTER_NODES}" > ${FILE} #we have only a single line in this file return 0 fi done done [[ -z $application_id ]] && { log 3 "ERROR: No application ID found for instance number ${INO} for SAP system ${SID}.\n" return 2 #we found no application ID } log 3 "ERROR: Cluster resource is not found.\n" return 2 ;; fra ) #Parameters - only NODE is optional. #--res RES Cluster resource to be controlled. --act ACT Cluster action (start/stop/move) to be "fired". #[ --nod NODE ] For migrations this defines the destination node. #Return Codes #0: success; #1: no success / internal error; #2: Not a clustered Resource. #3: Start in progress by PowerHA #4: Stop in Progress by PowerHA #5: BUFF_TOO_SMALL #6: Ha Timeout. log 3 "INFO: call fra ${RES} ${ACT} ${NODE}.\n" typeset -i stabint typeset -i sleepint=0 # Guess supplied RES as APP-name RG=$(clodmget -q "name=RESOURCE_GROUP and application_id=${RES}" -n -f value HACMPsap_connector 2>/dev/null) # Guess supplied RES as RG-name APP_ID=$(clodmget -q "name=RESOURCE_GROUP and value=${RES}" -n -f application_id HACMPsap_connector 2>/dev/null) [[ -z $RG ]] && [[ -z $APP_ID ]] && { log 0 "ERROR: Function fra called for a non clustered Instance." return 2 } # If supplied RES is APP-name, we get RG value from ODM. So, re-initialize the variables. # Else, RES will be RG-name and we get APP_ID from ODM. [[ -n $RG ]] && { APP_ID=${RES} RES=${RG} } [[ $(/usr/es/sbin/cluster/utilities/get_local_nodename) == ${NODE} ]] && log 1 "WARNING: the node specified is same as the local node name. A move will be ineffective." #Verify if by configuration the HA API is disabled APP_TYPE=$(/usr/bin/odmget -q "name=INSTANCE_NAMES and application_id=${APP_ID}" HACMPsap_connector 2>/dev/null |grep -w value |awk '{print $3}') [[ $OSCON_OnOff_ERS == 0 ]] && [[ $APP_TYPE == @(ERS*) ]] && { log 1 "WARNING: The RG ${RES} is intentionally disabled in the OS-Connector script. To enable to to the change/show pannels of your Smart Assist Application." return 2 } [[ $APP_TYPE == @(*SCS*) ]] && { [[ $OSCON_OnOff_CS == 0 ]] && { log 1 "WARNING: The RG ${RES} is intentionally disabled in the OS-Connector script. To enable to to the change/show pannels of your Smart Assist Application." return 2 } #for CS instances we set NODE to zero in case it is an ers enabled instance #This ensures to always follow the replication ERS RG [[ $(/usr/bin/odmget -q "application_id=$APP_ID and name=IS_ERS_ENABLED" HACMPsap_connector 2>/dev/null | grep -w value | awk '{print $3}' |sed -e 's/\"//g') == 1 ]] && { log 3 "WARNING: We set the target node ${NODE} to empty. This instance is replication its enqueues. The target node will be determined by the cluster solution." NODE="" } } [[ $OSCON_OnOff_App == 0 ]] && [[ $APP_TYPE != @(*SCS*|ERS*) ]] && { log 1 "WARNING: The RG ${RES} is intentionally disabled in the OS-Connector script. To enable to to the change/show pannels of your Smart Assist Application." return 2 } CL_STATE=$(lssrc -ls clstrmgrES | grep "Current state" | cut -f2 -d : | tr -d ' ') [[ $CL_STATE == "ST_STABLE" ]] || { log 0 "ERROR: cluster cannot accept incomming directions from external sources. The cluster already initiated corrective actions. Cluster state: ${CL_STATE}." #cluster cannot perform any action from unstable state return 1 } [[ ${ACT} == "start" ]] && { # check if fs and ip is up. Otherwise we could do this using clRGinfo as an alternative APP2=$(echo $APP_TYPE | cut -f2 -d_) APP1=$(echo $APP_TYPE | cut -f1 -d_) [[ -n $APP2 ]] && { [[ $(ls /usr/sap/${SID}/${APP1} | grep -c work) > 0 ]] && { log 0 "ERROR: stop to perform a RG action of ${ACT}. Filesystem is not mounted." return 1 } } [[ $(ls /usr/sap/${SID}/${APP2} | grep -c work) > 0 ]] && { log 0 "ERROR: stop to perform a RG action of ${ACT}. Filesystem is not mounted." return 1 } # Check if the RG is online # Should add code to check if RG is online Locally ONly clRGinfo -s | grep $RES | grep ONLINE [[ $? != 0 ]] && { log 0 "ERROR: cannot move an offline RG. RG name: ${RES} is not ONLINE on any node." return 1 } if [[ $(/usr/bin/odmget -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}' | sed -e 's/\"//g') == "101" ]] then log 3 "INFO: calling Application start script for ${RES}." /usr/es/sbin/cluster/sa/sap/sbin/cl_sapStart -a ${APP_ID} [[ $? != 0 ]] && on_error 1 stabint= $(/usr/bin/odmget -q "application_id=${APP_ID} and name=STABILIZATION_INTERVAL" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}') if [[ $stabint > $sleepint ]] then sleepint=$stabint fi sleep $sleepint fi echo "HACMPsap_connector: \n\ value=0" |/usr/bin/odmchange -o HACMPsap_connector -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" return 0 } [[ ${ACT} == "stop" ]] && { clRGinfo -s | grep $RES | grep ONLINE [[ $? != 0 ]] && { log 0 "ERROR: cannot move an offline RG. RG name: ${RES} is not ONLINE on any node." return 1 } if [[ $(/usr/bin/odmget -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" HACMPsap_connector 2>/dev/null |grep -w value |cut -d= -f2 | awk '{print $1}' | sed -e 's/\"//g') == "0" ]] then echo "HACMPsap_connector: \n\ value=101" |/usr/bin/odmchange -o HACMPsap_connector -q "application_id=${APP_ID} and name=SAP_HA_GLUE_CODE" log 3 "INFO: calling Application stop script for ${RES}." /usr/es/sbin/cluster/sa/sap/sbin/cl_sapStop -a ${APP_ID} [[ $? == 0 ]] && return 0 fi return 1 } [[ ${ACT} == "migrate" ]] && { #It has been verified that the cluster state is ST_STABLE #In case of an CS Instance NODE as been set to an empty string #check if RG is online clRGinfo -s | grep $RES | grep ONLINE [[ $? != 0 ]] && { log 0 "ERROR: cannot move an offline RG. RG name: ${RES} is not ONLINE on any node." return 1 } Target_Node="" [[ -n $NODE ]] && Target_Node="-n ${NODE}" clRGmove -g $RES ${Target_Node} -m >> $OSCON_LOG_FILE [[ $? == 0 ]] && return 0 return 1 } return 2 ;; lsn ) #Return Codes #0: success; #1: no success / internal error; #2: script notified bad parameters; #Output Formatted query answer to given FILE. #One single row (data set). #Values are separated by a colon (':'). #RES - Cluster Resource Identifier #GRP - Cluster Group Identifier #CURR_NODE NODE - which is currently running the resource #PRIO_NODES - Priorized list of type NODE (separated with comma (,)) within the cluster that could run the resource #Format: GRP:APP_ID:CURR_NODE:PRIO_NODES log 3 "INFO: call lsn ${FILE} ${RES}.\n" # Guess supplied RES as APP-name RG=$(clodmget -q "name=RESOURCE_GROUP and application_id=${RES}" -n -f value HACMPsap_connector 2>/dev/null) # Guess supplied RES as RG-name APP_ID=$(clodmget -q "name=RESOURCE_GROUP and value=${RES}" -n -f application_id HACMPsap_connector 2>/dev/null) [[ -z $RG ]] && [[ -z $APP_ID ]] && { log 0 "ERROR(lsn): Function lsn called for a non clustered Instance." return 2 } # If supplied RES is APP-name, we get RG value from ODM. So, re-initialize the variables. # Else, RES will be RG-name and we get APP_ID from ODM. [[ -n $RG ]] && { APP_ID=${RES} RES=${RG} } CURR_NODE=$(LC_ALL=C /usr/es/sbin/cluster/utilities/clRGinfo -c ${RES}| grep "ONLINE" | awk -F : '{print $3}') REST_NODES=$(/usr/bin/odmget -q "group=${RES}" HACMPgroup 2>/dev/null | grep -w nodes |cut -d= -f2 | sed -e 's/\"//g') # We may need to re-think the following code,if SAP expects CURR_NODE info only when resource group is online. if [[ -z $CURR_NODE ]] then # Resource group is not online, get the primary node and considered it as current node CURR_NODE=$(echo $REST_NODES | awk '{print $1}') fi echo "${RES}:${APP_ID}:${CURR_NODE}:${REST_NODES}" > ${FILE} ;; * ) ;; esac