#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2017,2021. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # 61haes_r721 src/43haes/usr/sbin/cluster/events/reconfig_resource_acquire.sh 1.44.7.9 # # Licensed Materials - Property of IBM # # COPYRIGHT International Business Machines Corp. 1996,2016 # All Rights Reserved # # US Government Users Restricted Rights - Use, duplication or # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # # IBM_PROLOG_END_TAG # @(#) 7d4c34b 43haes/usr/sbin/cluster/events/reconfig_resource_acquire.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM ######################################################################### # Including file containing SCSIPR functions . /usr/es/sbin/cluster/events/utils/cl_scsipr_event_functions ######################################################################### # # Name: isRGOwner # # Description: Determines the state (primary, secondary, etc) # of the group of interest on the local node # # Arguments: rg - resource group of interest # # Returns: none - echo's the state to stdout # ######################################################################### function isRGOwner { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset SEARCH_RG=$1 typeset rg typeset state typeset node typeset cstate typeset startup_pref typeset fallover_pref typeset fallback_pref # : We should be using AUXILLIARY_ACTIONS / PRIMARY_ACTIONS to determine whether we\'re primary / secondary # if [[ -s ${TEMPPATH_RRI}clRGinfo.out ]] then cat ${TEMPPATH_RRI}clRGinfo.out | while IFS=: read rg state node cstate startup_pref fallover_pref fallback_pref do if [[ $SEARCH_RG == "$rg" && $node == $LOCALNODENAME ]] then if [[ $state == "ONLINE SECONDARY" ]] then echo "secondary" return fi if [[ $state == "ONLINE" ]] then echo "primary" return fi fi done echo "unknown" fi } ######################################################################### # # Name: rri_get_secondary_sustained # # Description: Lists any resource groups in secondary sustained # # Arguments: None # # Returns: none - echo's the group name(s) to stdout # ######################################################################### function rri_get_secondary_sustained { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset AUXILLIARY_ACTIONS_TMP=$AUXILLIARY_ACTIONS for group in $RESOURCE_GROUPS do echo $AUXILLIARY_ACTIONS_TMP | read action AUXILLIARY_ACTIONS_TMP if [[ $action == "S" ]] then echo $group fi done } ######################################################################### # # Name: rri_acquire_secondary # # Description: Prepares the environment for a call to node_up_local # in order to acquire secondary instances of a # resource group # # Arguments: resource group which will have secondary instances # brought online # # Returns: exit status from node_up_local # ######################################################################### function rri_acquire_secondary { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset STATUS=0 # : Resource group to acquire # typeset RGNAME=$1 # : Clear out the resource variables prior to setting any variables # for res in $RESOURCES $RRI_RESOURCES do export $res= done rri_acquire_variables "secondary" $RGNAME if (( $? != 0 )) then export PRINCIPAL_ACTION="NONE" export ASSOCIATE_ACTION="SUSTAIN" export AUXILLIARY_ACTION="ACQUIRE_SECONDARY" export VG_RR_ACTION="ACQUIRE" export FOLLOWER_ACTION="ACQUIRE_SECONDARY" export GROUPNAME=$RGNAME # : the primary is not up at this point, so assume no siblings # export SIBLING_NODES="" export SIBLING_NODES_BY_GROUP="" # : Now, bring the secondary instance resources on-line # if ! node_up_local then # : If ANY failure has occurred, this script should exit accordingly # cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $RGNAME. Manual intervention required." $PROGNAME $RGNAME STATUS=1 fi fi return $STATUS } ######################################################################### # # Name: rri_acquire_variables # # Description: Helper function used by rri_acquire_secondary to # setup the RRI acquire variables for the instances # of the specified resource group # # Arguments: type of online (primary or secondary) # resource group of interest # # Returns: 1 if there are any actions (events) required for # this group # ######################################################################### function rri_acquire_variables { # : Generate the RRI acquire variables for primary / secondary for the specified RG # typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x # : resource group to determine # typeset RGTYPE=$1 # primary or secondary depending on when we're called typeset RGNAME=$2 typeset OWNER_TYPE=$(isRGOwner $RGNAME) typeset RRI_RUN=0 for res in $RRI_RESOURCES do ACQUIRE_FILENAME=$TEMPPATH_RRI$RGNAME.ACQUIRE_RESOURCES.$res if [[ -f $ACQUIRE_FILENAME ]] then RESOURCES_TO_ACQUIRE=$(cat $ACQUIRE_FILENAME | sed -e s/\"//g) if [[ -n $RESOURCES_TO_ACQUIRE ]] then # : If we\'ve been asked to only report RGs for primary instances, then only report : primary instances # if [[ $OWNER_TYPE == $RGTYPE ]] then export $res="$RESOURCES_TO_ACQUIRE" # : If this is a GMVG we need to specify the same list of resources for VOLUME_GROUP # if [[ $res == "GMVG_REP_RESOURCE" ]] then for vg_resource in $RESOURCES_TO_ACQUIRE do VG_ACQUIRE_FILENAME=${TEMPPATH}${RGNAME}.ACQUIRE_RESOURCES.CONCURRENT_VOLUME_GROUP CONCURRENT_VG="" if [[ -f $VG_ACQUIRE_FILENAME ]] then CONCURRENT_VG=$(cat $VG_ACQUIRE_FILENAME | sed -e s/\"//g | grep -w $vg_resource) fi if [[ -z $CONCURRENT_VG ]] then if [[ -z $VOLUME_GROUP ]] then export VOLUME_GROUP="$vg_resource" else export VOLUME_GROUP="$vg_resource $VOLUME_GROUP" fi else echo "$vg_resource is a concurrent volume group" fi done export FILESYSTEM="ALL" fi RRI_RUN=1 fi fi fi done return $RRI_RUN } ######################################################################### # # # Name: create_associate_groups # # # # Description: Creates the list of accosiate resource group # # # # Called by: # # # # Calls to: # # # # Arguments: none # # # # Returns: none # # # ######################################################################### function create_associate_groups { typeset PS4_FUNC=$0 ASSOCIATE_GROUPS="" RESOURCE_GROUPS=$(clodmget -f group -n HACMPgroup) for group in $RESOURCE_GROUPS do IN_SUSTAINED_GROUPS=$(echo $SUSTAINED_GROUPS | grep -w $group) IN_ACQUIRED_GROUPS=$(echo $ACQUIRED_GROUPS | grep -w $group) if [[ -z $IN_SUSTAINED_GROUPS && -z $IN_ACQUIRED_GROUPS ]] then NODE_LIST=$(clodmget -q"group = $group" -f nodes -n HACMPgroup | grep -w $LOCALNODENAME ) if [[ -n $NODE_LIST ]] then ASSOCIATE_GROUPS="$ASSOCIATE_GROUPS $group" fi fi done } ######################################################################### # # # Name: reconfig_resource_acquire # # # # Description: This script is called when a reconfig # # resource event. It acquires local resources # # # # Called by: cluster manager # # # # Calls to: # # # # Arguments: none # # # # Returns: 0 success # # 1 failure # # 2 bad argument # # # ######################################################################### # # # Main start here # # # ######################################################################### export PROGNAME=${0##*/} export EVENT_TYPE=$PROGNAME # Tell other scripts who called them export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)" DCD="/etc/es/objrepos" SCD="/usr/es/sbin/cluster/etc/objrepos/stage" ACD="/usr/es/sbin/cluster/etc/objrepos/active" set -a eval $(ODMDIR=$ACD cllsparam -n $LOCALNODENAME) set +a [[ $VERBOSE_LOGGING == high ]] && { set -x version='1.44.7.9' } # # these lists must match those in reconfig_resource_complete # RRI_RESOURCES="PPRC_REP_RESOURCE ERCMF_REP_RESOURCE SVCPPRC_REP_RESOURCE GMVG_REP_RESOURCE \ SR_REP_RESOURCE TC_REP_RESOURCE GENXD_REP_RESOURCE" RESOURCES="DISK VOLUME_GROUP CONCURRENT_VOLUME_GROUP FILESYSTEM FSCHECK_TOOL \ RECOVERY_METHOD EXPORT_FILESYSTEM APPLICATIONS MOUNT_FILESYSTEM SERVICE_LABEL \ INACTIVE_TAKEOVER SSA_DISK_FENCING TAKEOVER_LABEL NFS_HOST \ AIX_CONNECTIONS_SERVICES COMMUNICATION_LINKS AIX_FAST_CONNECT_SERVICES \ SHARED_TAPE_RESOURCES FORCED_VARYON \ PRINCIPAL_ACTION ASSOCIATE_ACTION AUXILLIARY_ACTION VG_RR_ACTION \ FOLLOWER_ACTION PPRC_REP_RESOURCE GMD_REP_RESOURCE \ SR_REP_RESOURCE TC_REP_RESOURCE GENXD_REP_RESOURCE\ ERCMF_REP_RESOURCE SVCPPRC_REP_RESOURCE VG_AUTO_IMPORT FS_BEFORE_IPADDR \ OEM_VOLUME_GROUP OEM_FILESYSTEM EXPORT_FILESYSTEM_V4 STABLE_STORAGE_PATH \ WPAR_NAME" UDRESTYPE_LIST=$(cludrestype -l -h | awk ' /USERDEFINED/ { printf("%s ",$1); }' ) RESOURCES="$RESOURCES $UDRESTYPE_LIST" # # these are the working directories for DARE - the location must # match the declaration in the other scripts involved # TEMPPATH="/var/hacmp/log/HACMP_RESOURCES/" export TEMPPATH_RRI=/var/hacmp/log/HACMP_REP_RESOURCES/ # : This will be the exit status seen by the Cluster Manager. : If STATUS is not 0, event error will run. : All lower-level scripts should pass status back to the caller. : This will allow a Resource Groups to be processed individaully, : independent of the status of another resource group. # STATUS=0 set -u if (( $# != 0 )) then cl_echo 1035 "Usage: $PROGNAME" $PROGNAME exit 2 fi # : Ensure that the ACD directory exists # if [[ ! -d $ACD ]] then cl_log 1042 "$ACD does not exist" $ACD exit 1 fi if [[ $PROGNAME == "reconfig_resource_acquire" ]] then # : deal with integration with WLM : remove the flag file used to indicate reconfig_resources # rm -f /usr/es/sbin/cluster/etc/.hacmp_wlm_config_changed cl_wlm_reconfig reconfig_resources typeset -i WLM_STATUS=$? if (( $WLM_STATUS == 0 )) then # : WLM support is required by the new configuration # cl_wlm_start WLM_STATUS=$? fi # : if an error occured in either cl_wlm_reconfig or cl_wlm_start : or if cl_wlm_reconfig found that WLM support is not needed by : the current cluster configuration, run cl_wlm_stop script to : clean up after any previous WLM management efforts # if (( $WLM_STATUS == 1 || $WLM_STATUS == 3 )) then cl_wlm_stop # return value will indicate an error, if one occured # but we don't really care, so no reason to check fi : done with WLM processing fi #end if reconfig_resource_acquire # : save the ACD, copy SCD to ACD # if [[ $RG_DEPENDENCIES == "TRUE" ]] then if ! mv $ACD $TEMPPATH/ then CMD="mv $ACD $TEMPPATH/ " cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD exit 1 fi if ! cp -rh $SCD $ACD 2>> /dev/null then CMD="cp -rh $SCD $ACD" cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD exit 1 fi fi # : This will return a list of resource groups \(\$RESOURCE_GROUPS\) and an : indication \(\$PRINCIPAL_ACTIONS\) whether they are to be acquired or : sustained. For those that are to be sustained, the old and new list of : resources are in temporary files # set -a eval $(clsetenvgrp $LOCALNODENAME "reconfig_resource_acquire") typeset -i RC=$? set +a if (( $RC != 0 )) then STATUS=1 fi if [[ $RG_DEPENDENCIES == "TRUE" ]] then # : restore ACD and SCD # if ! rm -rf $SCD then CMD="rm -rf $SCD" cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD exit 1 fi if ! mv $ACD $SCD then CMD="mv $ACD $SCD" cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD exit 1 fi if ! mv $TEMPPATH/active $ACD then CMD="mv $TEMPPATH/active $ACD" cl_log 701 "NOTE: Received failed return code from command: $CMD\n" $CMD exit 1 fi fi echo "RG_DEPENDENCIES = $RG_DEPENDENCIES" export UPDATESTATD=0 rm -f /tmp/.RPCLOCKDSTOPPED # : Obtain the list of secondary instances for RGs that we should sustain, this : function uses AUXILLIARY_ACTIONS to determine that information # SECONDARY_SUSTAINED_GROUPS=$(rri_get_secondary_sustained) SUSTAINED_GROUPS="" ACQUIRED_GROUPS="" ASSOCIATE_GROUPS="" export ACQUIRED_GROUPS # : Get the action for each resource group # PRINCIPAL_ACTIONS_TMP="$PRINCIPAL_ACTIONS" for group in $RESOURCE_GROUPS do echo $PRINCIPAL_ACTIONS_TMP | read action PRINCIPAL_ACTIONS_TMP case $action in S ) # : This group continues to be on this node, though the actual resources : in it may have changed # SUSTAINED_GROUPS="$SUSTAINED_GROUPS $group" ;; A ) # : This group must be newly acquired by this node # if [[ $RG_DEPENDENCIES == "FALSE" ]] then ACQUIRED_GROUPS="$ACQUIRED_GROUPS $group" fi ;; N ) # : This group processed on remote nodes # ASSOCIATE_GROUPS="$ASSOCIATE_GROUPS $group" ;; esac done export ODMDIR=$SCD if [[ $PROGNAME == "reconfig_resource_acquire_secondary" ]] then for group in $SECONDARY_SUSTAINED_GROUPS do # : Acquire secondary instances \(if any\) for this resource group # rri_acquire_secondary $group if (( $? != 0 )) then STATUS=1 fi done fi #end if reconfig_resource_acquire_secondary if [[ $PROGNAME == "reconfig_resource_acquire" ]] then # : Acquire those resource groups that are not currently on this node # for group in $ACQUIRED_GROUPS do # : Set the Resource Environment variables to the list of : resources in this group in the SCD # set -a eval $(clsetenvres $group $PROGNAME) set +a # : Will acquire nfs filesystem in reconfig_resource_complete... # MOUNT_FILESYSTEM="" export GROUPNAME=$group # : If any enhanced concurrent volume groups are used as serial resources, : bring them on-line in passive mode # # : If SCSIPR is enabled for cluster, reserve the VGs of ACQUIRED_GROUPS # typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge) if [[ $SCSIPR_ENABLED == Yes ]] then typeset NewVG=$(clodmget -n -q "group=$group and name like *VOLUME_GROUP" -f value HACMPresource) # : Register and Reserve the disks for the Volume Group in the RG $group # cl_scsipr_dare_Reg_Res $NewVG fi cl_pvo -g $GROUPNAME # : Now, bring the resource group\'s resources on-line # if ! node_up_local then # : If ANY failure has occurred, this script should exit accordingly # cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group STATUS=1 fi done # : And now work with the sustained resource groups # for group in $SUSTAINED_GROUPS do if [[ $RG_DEPENDENCIES == "TRUE" ]] then for siblingGroup in $SIBLING_GROUPS do if [[ $siblingGroup == $group ]] then set -a eval $(clsetrepenv $siblingGroup) set +a fi done fi RUN_SCRIPT=false for res in $RESOURCES do # : Clear the variable not set by clsetenvres # export $res="" VARIABLE="" # : Walk through the ACQUIRE_RESOURCES file, exporting the environment variables found : These files were created during the release phase # if [[ -f ${TEMPPATH}${group}.ACQUIRE_RESOURCES.${res} ]] then for variable in $(cat ${TEMPPATH}${group}.ACQUIRE_RESOURCES.${res} | cut -d'"' -f2) do if [[ -n $variable ]] then # : Handle the Site-Specific IP labels appropriately. # if [[ $res == "SERVICE_LABEL" ]] then SITENAME=$(clodmget -q "function=shared and ip_label=$variable" -f sitename -n HACMPadapter) if [[ $SITENAME != "ignore" && -n $SITENAME ]] then if [[ $SITENAME != $LOCALSITENAME ]] then continue fi fi fi RUN_SCRIPT="true" if [[ -z $VARIABLE ]] then VARIABLE=${variable} else VARIABLE="${VARIABLE} ${variable}" fi fi done fi # : Pass these through for node_up_local to acquire # export $res="$VARIABLE" done if [[ $RG_DEPENDENCIES == "TRUE" ]] then # Export the RRI variables rri_acquire_variables "primary" $group fi # : Force the node_up_local if we\'ve found RRI resources to acquire # if (( $? == 1 )) then RUN_SCRIPT=true fi # : Acquire the resources picked up above # if [[ $RUN_SCRIPT == true ]] then # : Will mount NFS filesystem in reconfig_resource_complete... # MOUNT_FILESYSTEM="" export GROUPNAME=$group export PRINCIPAL_ACTION="ACQUIRE" # : If any enhanced concurrent volume groups are used as serial resources, : bring them on-line in passive mode. Quoting done to pass volume : groups and file systems as space separated lists, not individual : parameters. # check_parms="" if [[ -n $VOLUME_GROUP ]] # pass volume groups if any then check_parms='-v "$VOLUME_GROUP"' vg_list=$VOLUME_GROUP fi if [[ -n $FILESYSTEM ]] # pass file systems if any then check_parms=$check_parms' -f "$FILESYSTEM"' for filesys in $FILESYSTEM do if [[ $filesys == ALL ]] then continue fi # : Get the volume group corresponding to $filesys # vg_list=${vg_list:+$vg_list" "}$(cl_fs2disk -v $filesys) done fi if [[ -n $check_parms ]] # if new ones then # : Set an initial fence height for all newly added volume groups # vg_list=$(echo $vg_list | tr ' ' '\n' | sort -u) for VG in $vg_list do # : Create or rebuild the fence group for $VG and set the : initial height to 'rw' # cl_vg_fence_redo -c $VG rw done # : Now passively varyon the new volume groups # eval cl_pvo $check_parms fi # : Now, bring the resource group\'s resources on-line # if ! node_up_local then # : If ANY failure has occurred, this script should exit accordingly # cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $group. Manual intervention required." $PROGNAME $group STATUS=1 fi fi done typeset RG_STATE_LOCAL="" typeset REPLICATED_RGS="" if [[ -n $(clodmget -n -f type HACMPrresmethods) ]] then # : Replicated resource methods are defined, check for resources # REPLICATED_RGS=$(clodmget -q "name like '*_REP_RESOURCE'" -f group -n HACMPresource) fi # : Process passive varyon of enhcanced concurrent volume groups # for group in $ASSOCIATE_GROUPS do if [[ -n $(print $REPLICATED_RGS | /usr/bin/grep -w "$group") ]] then RG_STATE_LOCAL=$(isRGOwner $group) if [[ $RG_STATE_LOCAL == "secondary" ]] then continue fi fi export GROUPNAME=$group # : If any enhanced concurrent volume groups are used as serial resources, : bring them on-line in passive mode # cl_pvo -g $GROUPNAME done if (( STATUS == 0 )); then # : Call clmanageroha with adjust option for resource adjustment # clmanageroha -o adjust -l "" 3>&2 STATUS=$? fi fi #end if reconfig_resource_acquire exit $STATUS