#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2019,2020,2021. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # 61haes_r721 src/43haes/usr/sbin/cluster/events/reconfig_resource_release.sh 1.111.2.11 # # Licensed Materials - Property of IBM # # COPYRIGHT International Business Machines Corp. 1996,2016 # All Rights Reserved # # US Government Users Restricted Rights - Use, duplication or # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # # IBM_PROLOG_END_TAG # @(#) 5f56dbd 43haes/usr/sbin/cluster/events/reconfig_resource_release.sh, 726, 2147A_aha726, Mar 08 2021 04:44 PM ######################################################################### # # # Name: reconfig_resource_release # # # # Description: This script is called when a reconfig # # resource event. It releases local resources # # # # Called by: cluster manager # # # # Calls to: # # # # Arguments: none # # # # Returns: 0 success # # 1 failure # # 2 bad argument # # # ######################################################################### # Including file containing SCSIPR functions . /usr/es/sbin/cluster/events/utils/cl_scsipr_event_functions ######################################################################### # # Name: associate_check # # Description: During a DARE operation, an 'associate' group # for a node is one for which the node is a member # either before the DARE or after or both, but in # neither state does it own the resource group. # # Since the normal DARE resource processing does # nothing for such groups on a node, special # processing is required to handle passive varyons # associated with enhanced concurrent volume groups # used as serial resources. # # This routine determines what, if any, the volume # group access changes are for this node, and drives # the release or acquisition of passive varyons # # Input: Resource group name # # $LOCALNODENAME # # Returns: 0 # ######################################################################### function associate_check { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x GROUPNAME=$1 typeset new_group_list new_vg_list new_fs_list new_disk_list new_rawdisk_list typeset old_group_list old_vg_list old_fs_list old_disk_list old_rawdisk_list typeset release_vg_list release_fs_list release_disk_list release_rawdisk_list typeset acquire_vg_list acquire_fs_list acquire_disk_list acquire_rawdisk_list # : Find out what the new configuration looks like for this resource : group: nodes, volume groups and file systems # export ODMDIR=$SCD new_node_list=$(clodmget -q "group = $GROUPNAME" -f nodes -n HACMPgroup) new_vg_list=$(clodmget -q "name like '*VOLUME_GROUP' AND group = $GROUPNAME" -f value -n HACMPresource) new_fs_list=$(clodmget -q "name = FILESYSTEM AND group = $GROUPNAME" -f value -n HACMPresource) new_disk_list=$(clodmget -q "name=DISK and group = $GROUPNAME" -f value -n HACMPresource) new_rawdisk_list=$(clodmget -q "name=RAW_DISK and group = $GROUPNAME" -f value -n HACMPresource) # : Find out what the old configuration looks like for this resource : group: nodes, volume groups and file systems # export ODMDIR=$ACD old_node_list=$(clodmget -q "group = $GROUPNAME" -f nodes -n HACMPgroup) old_vg_list=$(clodmget -q "name like '*VOLUME_GROUP' AND group = $GROUPNAME" -f value -n HACMPresource) old_fs_list=$(clodmget -q "name = FILESYSTEM AND group = $GROUPNAME" -f value -n HACMPresource) old_disk_list=$(clodmget -q "name=DISK and group = $GROUPNAME" -f value -n HACMPresource) old_rawdisk_list=$(clodmget -q "name=RAW_DISK and group = $GROUPNAME" -f value -n HACMPresource) # : Check to see if this node is not in the new configuration. If so, : any passive varyons associated with this resource group must be : released. # if [[ $new_node_list != @(?(* )$LOCALNODENAME?( *)) ]] then release_vg_list=$old_vg_list release_fs_list=$old_fs_list release_disk_list=$old_disk_list release_rawdisk_list=$old_rawdisk_list acquire_vg_list="" acquire_fs_list="" acquire_disk_list="" acquire_rawdisk_list="" # : Check to see if this node is not in the old configuration. If so, : any serially used enhanced concurrent mode volume groups must be : passively varied on. # elif [[ $old_node_list != @(?(* )$LOCALNODENAME?( *)) ]] then release_vg_list="" release_fs_list="" release_disk_list="" release_rawdisk_list="" acquire_vg_list=$new_vg_list acquire_fs_list=$new_fs_list acquire_disk_list=$new_disk_list acquire_rawdisk_list=$new_rawdisk_list # : Else this node is in both the old and new configurations for : this resource group. A more detailed comparison of the old and : new volume group and file system lists are required. # else # : First, look for changed volume groups # if [[ $new_vg_list == $old_vg_list ]] then # : If the new and old volume group lists are the same, nothing : needs to be released or acquired # release_vg_list="" acquire_vg_list="" else # : The volume group list changes. Figure out whats different : between the new and old versions. # echo $new_vg_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.new_vg_list echo $old_vg_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.old_vg_list # : A volume group in the old list but not the new may need to be : varied off # release_vg_list=$(comm -1 -3 ${TEMPPATH}${GROUPNAME}.new_vg_list \ ${TEMPPATH}${GROUPNAME}.old_vg_list ) # : A volume group in the new list but not the old may need to be : passively varyied on # acquire_vg_list=$(comm -2 -3 ${TEMPPATH}${GROUPNAME}.new_vg_list \ ${TEMPPATH}${GROUPNAME}.old_vg_list ) fi # : Look for differences in file system configuration # if [[ $new_fs_list == $old_fs_list ]] then # : If the new and old file system lists are the same, nothing : needs to be released or acquired # release_fs_list="" acquire_fs_list="" else # : The file system list changes. Figure out whats different : between the old and new versions. # echo $new_fs_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.new_fs_list echo $old_fs_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.old_fs_list # : A file system in the old list but not the new may need to have : its owning volume group varied off # release_fs_list=$(comm -1 -3 ${TEMPPATH}${GROUPNAME}.new_fs_list \ ${TEMPPATH}${GROUPNAME}.old_fs_list ) # : A file system in the new list but not the old may need to have : its owning volume group passively varied on # acquire_fs_list=$(comm -2 -3 ${TEMPPATH}${GROUPNAME}.new_fs_list \ ${TEMPPATH}${GROUPNAME}.old_fs_list ) fi # : Look for differences in disk configuration # if [[ $new_disk_list == $old_disk_list ]] then # : New disk list and old disk list are same, : no disks to be acquired or released. # release_disk_list="" acquire_disk_list="" else # : The disk list changes. Figure out whats different : between the old and new versions. # echo $new_disk_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.new_disk_list echo $old_disk_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.old_disk_list release_disk_list=$(comm -1 -3 ${TEMPPATH}${GROUPNAME}.new_disk_list \ ${TEMPPATH}${GROUPNAME}.old_disk_list ) acquire_disk_list=$(comm -2 -3 ${TEMPPATH}${GROUPNAME}.new_disk_list \ ${TEMPPATH}${GROUPNAME}.old_disk_list ) fi # : Look for diffrences in raw disk configuration # if [[ $new_rawdisk_list == $old_rawdisk_list ]] then # : New disk list and old disk list are same, : no disks to be acquired or released. # release_rawdisk_list="" acquire_rawdisk_list="" else # : The disk list changes. Figure out whats different : between the old and new versions. # echo $new_rawdisk_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.new_rawdisk_list echo $old_rawdisk_list | tr ' ' '\n' > ${TEMPPATH}${GROUPNAME}.old_rawdisk_list release_rawdisk_list=$(comm -1 -3 ${TEMPPATH}${GROUPNAME}.new_rawdisk_list \ ${TEMPPATH}${GROUPNAME}.old_rawdisk_list ) acquire_rawdisk_list=$(comm -2 -3 ${TEMPPATH}${GROUPNAME}.new_rawdisk_list \ ${TEMPPATH}${GROUPNAME}.old_rawdisk_list ) fi fi # : If there are any volume groups or file systems that might need a : varyoff, call the routine to check if they are passively varied on, : and vary them off # if [[ -n $release_vg_list || -n $release_fs_list ]] then export VOLUME_GROUP=$release_vg_list export FILESYSTEM=$release_fs_list pvo_check fi # : If there are any volume groups or file systems that might need a : passive varyon, pass them to the routine to check and do so # check_parms="" if [[ -n $acquire_vg_list ]] then check_parms='-v "$acquire_vg_list"' export ACQUIRE_VG=yes fi if [[ -n $acquire_fs_list ]] then check_parms=$check_parms' -f "$acquire_fs_list"' export ACQUIRE_FS=yes fi # : Remove the PR Registration and reservation from all the VGs in $release_vg_list : Register and reserve all VGs in $acquire_vg_list. # typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge) if [[ $SCSIPR_ENABLED == Yes ]] then typeset clusterNodes=$(clodmget -q "object=COMMUNICATION_PATH" -f name -n HACMPnode) typeset VolGrp="" for VolGrp in $release_vg_list do # : Clear the PR Registration and Reservation from Volume Group, $VolGrp # clpr_clear_vg $VolGrp done for VolGrp in $acquire_vg_list do # : Register and reserve VGs, $acquire_vg_list # for noden in $clusterNodes do cl_rsh $noden /usr/es/sbin/cluster/events/utils/cl_scsipr_dare_reg_res $VolGrp done done typeset pvid="" for pvid in $release_disk_list do typeset hdisk=$(lspv -L | grep -w $pvid | awk '{print $1}') if [[ -n $hdisk ]];then clpr_clear $hdisk fi done for pvid in $acquire_disk_list do for noden in $clusterNodes do cl_rsh $noden /usr/es/sbin/cluster/events/utils/cl_scsipr_reg_res $pvid done done typeset uuid="" for uuid in $release_rawdisk_list do typeset hdisk=$(lspv -u | grep -w $uuid | awk '{print $1}') if [[ -n $hdisk ]];then clpr_clear $hdisk fi done for uuid in $acquire_rawdisk_list do for noden in $clusterNodes do cl_rsh $noden /usr/es/sbin/cluster/events/utils/cl_scsipr_reg_res $uuid done done fi if [[ -n $check_parms ]] then eval cl_pvo $check_parms fi } ######################################################################### # # Name: pvo_check # # Description: Check the volume groups and file systems that are # being released; if any are passively vary'd on, # vary them off # # Input: VOLUME_GROUP - list of volume groups being released # # FILESYSTEM - list of file systems being released # # Returns: 0 # ######################################################################### function pvo_check { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset vg_list # : Pick up any passed volume groups # if [[ -n $VOLUME_GROUP ]] then vg_list=$VOLUME_GROUP else vg_list="" fi # : Check for any passed file systems # if [[ -n $FILESYSTEM ]] then # : For passed file systems, find the underlying volume groups : and add them to the list, since a resouce group can have : filesystems with volume groups as an implicit resource # for filesys in $FILESYSTEM do if [[ $filesys == ALL ]] # if ALL was specified then continue # must have had a volume group else # : Find the volume group for this filesystem, and add : it to the list # vg_list="$vg_list $(cl_fs2disk -v $filesys)" fi done fi if [[ -n $vg_list ]] # some volume groups to process then # : Strip out any duplicates # vg_list=$(echo $vg_list | tr ' ' '\n' | sort -u) # : Check each volume group to see if its passively varied on # for vg in $vg_list do if LC_ALL=C lsvg -L $vg 2>/dev/null | grep -i -q 'passive-only' then # : Vary off any such : First remove any read only fencing # cl_set_vg_fence_height -c $vg rw RC=$? : return code from volume group fencing is $RC if (( 0 != $RC )) then # : Log any error, but continue. If this is a real problem, : manual intervention may be needed. # cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read/write" $PROGNAME $vg rw fi /usr/es/sbin/cluster/utilities/cltime varyoffvg $vg RC=$? /usr/es/sbin/cluster/utilities/cltime : rc_varyoffvg = $RC # : If volume group fencing is in place, remove the fence : group since PowerHA is no longer managing this volume : group # cl_vg_fence_term -c $vg RC=$? : return code from volume group fencing is $RC if (( 0 != $RC )) then # : Log any error, but continue. If this is a real problem, : manual intervention may be needed. # cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read/only" $PROGNAME $vg ro fi fi done fi return 0 } ######################################################################### # # # Name: nfs_handler # # # # Description: Handles appropriate NFS-related resources # # # # Called by: # # # # Calls to: # # # # Arguments: none # # # # Returns: 0 success # # 1 failure # # # ######################################################################### function nfs_handler { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x # : First, filter the current set of mounted file systems by what is : actually mounted. This is to catch the case in which a stopped : resource gets started... # FINAL="" for current_fs in $(cat $TEMPPATH$cgroup.CURRENT.MOUNT_FILESYSTEM | cut -d'"' -f2) do if echo $current_fs | grep -q "\;/" then mnt=$(echo $current_fs | cut -f1 -d\;) else mnt="$current_fs" fi if mount | grep -q $mnt then FINAL="$FINAL $current_fs" fi done echo \"$FINAL\" > $TEMPPATH$cgroup.CURRENT.MOUNT_FILESYSTEM # : If the current NFS HOST and the new NFS HOST are the same, : then the scripts should do nothing for MOUNT FILESYSTEMS : which should also be the same, should mount those that : are new but not current, and should unmount those which : are current but not new. # if diff $TEMPPATH$cgroup.CURRENT.NFS_HOST.TMP $TEMPPATH$cgroup.NEW.NFS_HOST.TMP then for current_fs in $(cat $TEMPPATH$cgroup.CURRENT.MOUNT_FILESYSTEM | cut -d'"' -f2) do FOUND="false" for new_fs in $(cat $TEMPPATH$cgroup.NEW.MOUNT_FILESYSTEM | cut -d'"' -f2) do if [[ $current_fs == $new_fs ]] then FOUND="true" break fi done # : If in current but not in new, add to release script # if [[ $FOUND == "false" ]] then echo "$current_fs" >> $TEMPPATH$cgroup.RELEASE_RESOURCES.MOUNT_FILESYSTEM cat "$TEMPPATH$cgroup.CURRENT.NFS_HOST.TMP" > $TEMPPATH$cgroup.RELEASE_RESOURCES.NFS_HOST fi done for new_fs in $(cat $TEMPPATH$cgroup.NEW.MOUNT_FILESYSTEM | cut -d'"' -f2) do FOUND="false" for current_fs in $(cat $TEMPPATH$cgroup.CURRENT.MOUNT_FILESYSTEM | cut -d'"' -f2) do if [[ $new_fs == "$current_fs" ]] then FOUND="true" break fi done # : If in new but not in current, add to acquire script # if [[ $FOUND == "false" ]] then echo "$new_fs" >> $TEMPPATH$cgroup.ACQUIRE_RESOURCES.MOUNT_FILESYSTEM cat "$TEMPPATH$cgroup.NEW.NFS_HOST.TMP" > $TEMPPATH$cgroup.ACQUIRE_RESOURCES.NFS_HOST fi done else # : If the current NFS HOST and the new NFS HOST are different, : always unmount and then remount the MOUNT FILESYSTEM # cat "$TEMPPATH$cgroup.CURRENT.MOUNT_FILESYSTEM.TMP" > $TEMPPATH$cgroup.RELEASE_RESOURCES.MOUNT_FILESYSTEM cat "$TEMPPATH$cgroup.NEW.MOUNT_FILESYSTEM.TMP" > $TEMPPATH$cgroup.ACQUIRE_RESOURCES.MOUNT_FILESYSTEM cat "$TEMPPATH$cgroup.CURRENT.NFS_HOST.TMP" > $TEMPPATH$cgroup.RELEASE_RESOURCES.NFS_HOST cat "$TEMPPATH$cgroup.NEW.NFS_HOST.TMP" > $TEMPPATH$cgroup.ACQUIRE_RESOURCES.NFS_HOST fi for res in $RESOURCES do if [[ $res == "MOUNT_FILESYSTEM" || $res == "NFS_HOST" ]] then # : Clear the current environment variables # export $res="" VARIABLE="" # : Walk through the RELEASE_RESOURCES file, exporting the environment variables found # if [[ -f $TEMPPATH$cgroup.RELEASE_RESOURCES.$res ]] then for variable in $(cat $TEMPPATH$cgroup.RELEASE_RESOURCES.$res | cut -d'"' -f2) do if [[ -n $variable ]] then RUN_SCRIPT="true" if [[ -z $VARIABLE ]] then VARIABLE=${variable} else VARIABLE="${VARIABLE} ${variable}" fi fi done fi export $res="$VARIABLE" fi done } ######################################################################### # # Name: save_resources # # Description: For each resource group, create a file in the # working directory with the resources in that # group that need to be worked on # # Arguments: none # # Returns: none # ######################################################################### function save_resources { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x # : Go find the resources associated with the given resource group that : we have to process at this point. # set -a eval $(clsetenvres $group $PROGNAME_UNLINKED) set +a for res in $RESOURCES do # : Only create files for which clsetenvres creates variables # RES=\$"$res" set +u VAL="$(eval echo $RES)" set -u # : Create current resources files # if [[ -z $VAL ]] then echo "" > $TEMPPATH$group.$FILEEXTENSION.$res else # : Create resources files. If a resource exists, : save the resource value. If not, echo the variable name. # if [[ $res == NFS_HOST ]] then echo "$VAL" > $TEMPPATH$group.$FILEEXTENSION.$res else if [[ $res == TAKEOVER_LABEL ]] then # : look for the service label # clodmget -q "group = $group and name = SERVICE_LABEL" -f value -n HACMPresource > $TEMPPATH$group.$FILEEXTENSION.$res else # : Do report GMVGs as volume groups # if [[ $res == VOLUME_GROUP ]] then # : If the volume group is a GMVG do write it out to the temp file # typeset temp_val="" for value in $VAL do temp_val="$temp_val\n$value" done # : Dump the resources to the file # echo "$temp_val" > $TEMPPATH$group.$FILEEXTENSION.$res else clodmget -q "group = $group and name = $res" -f value -n HACMPresource > $TEMPPATH$group.$FILEEXTENSION.$res fi # VOLUME_GROUP / GMVG fi #TAKEOVER_LABEL fi #NFS_HOST fi #VAL done #RESOURCES } ######################################################################### # # Name: rri_get_secondary_sustained # # Description: Lists any resource groups in secondary sustained # # Arguments: None # # Returns: none - echo's the group name(s) to stdout # ######################################################################### function rri_get_secondary_sustained { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset AUXILLIARY_ACTIONS_TMP=$AUXILLIARY_ACTIONS for group in $RESOURCE_GROUPS; do echo $AUXILLIARY_ACTIONS_TMP | read action AUXILLIARY_ACTIONS_TMP if [[ $action == "S" ]] then print $group fi done } ######################################################################### # # Name: save_rri_resources # # Description: For each resource group, create a file in the # working directory with the RRI resources in that # group that need to be worked on # # Arguments: none # # Returns: none # ######################################################################### function save_rri_resources { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x # : Find the list of RRI resources within the given resource group that : we have to save off to release # # : Export any variables that are produced from running clsetenvres # set -a eval $(clsetenvres $group $PROGNAME_UNLINKED) set +a for res in $RRI_RESOURCES do RES=\$"$res" set +u VAL="$(eval echo $RES)" set -u FILENAME=$TEMPPATH_RRI$group.RRI.$FILEEXTENSION.$res if [[ -z $VAL ]] then echo "" > $FILENAME else clodmget -q "group = $group and name = $res" -f value -n HACMPresource > $FILENAME fi done } ######################################################################### # # Name: isRGOwner # # Description: Determines the state (primary, secondary, etc) # of the group of interest on the local node # # Arguments: rg - resource group of interest # # Returns: none - echo's the state to stdout # ######################################################################### function isRGOwner { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset SEARCH_RG=$1 typeset rg typeset state typeset node typeset cstate typeset startup_pref typeset fallover_pref typeset fallback_pref # : We should be using ASSOCIATE_ACTIONS / PRIMARY_ACTIONS to determine whether we are primary / secondary # if [[ -s ${TEMPPATH_RRI}clRGinfo.out ]] then cat ${TEMPPATH_RRI}clRGinfo.out | while IFS=: read rg state node cstate startup_pref fallover_pref fallback_pref do if [[ $SEARCH_RG == $rg && $node == $LOCALNODENAME ]]; then if [[ $state == "ONLINE SECONDARY" ]] then echo "secondary" return fi if [[ $state == "ONLINE" ]] then echo "primary" return fi if [[ $state == "UNMANAGED SECONDARY" ]] then echo "secondary" return fi if [[ $state == "UNMANAGED" ]] then echo "primary" return fi fi done echo "unknown" fi } ######################################################################### # # Name: rri_generate_acquire_release # # Description: Generate the ACQUIRE/RELEASE files for replicated resources # # Arguments: n/a # # Externals: # ######################################################################### function rri_generate_acquire_release { typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x for cgroup in $SUSTAINED_GROUPS $SECONDARY_SUSTAINED_GROUPS do OWNER_TYPE=$(isRGOwner $cgroup) export ODMDIR=$ACD # : Determine which resources, if any, have been removed. : Sort the current and new files, removing duplicates. # for res in $RRI_RESOURCES do RRI_CURRENT=$TEMPPATH_RRI$cgroup.RRI.CURRENT.$res RRI_NEW=$TEMPPATH_RRI$cgroup.RRI.NEW.$res # : if either of the files is present, make sure we have both # if [[ -f $RRI_CURRENT || -f $RRI_NEW ]] then touch $RRI_CURRENT touch $RRI_NEW fi if [[ -f $RRI_CURRENT && -f $RRI_NEW ]] then sort -u $RRI_CURRENT > $RRI_CURRENT.tmp sort -u $RRI_NEW > $RRI_NEW.tmp # : Compare these files and create two new files - release_resources and : acquire_resources All resources which are current but not in the new : configuration should be released. All resources which are new but not : in the current configuration should be acquired. # ACQUIRE_FILENAME=$TEMPPATH_RRI$cgroup.ACQUIRE_RESOURCES.$res RELEASE_FILENAME=$TEMPPATH_RRI$cgroup.RELEASE_RESOURCES.$res RESOURCES_TO_ACQUIRE=$(comm -1 -3 $RRI_CURRENT.tmp $RRI_NEW.tmp > $ACQUIRE_FILENAME && cat $ACQUIRE_FILENAME) RESOURCES_TO_RELEASE=$(comm -2 -3 $RRI_CURRENT.tmp $RRI_NEW.tmp > $RELEASE_FILENAME && cat $RELEASE_FILENAME) fi done done } ######################################################################### # # Name: rri_acquire_variables # # Description: Helper function to setup the RRI acquire variables # for the instances of the specified resource group # # Arguments: type of online (primary or secondary) # resource group of interest # # Returns: 1 if there are any actions (events) required for # this group # ######################################################################### function rri_release_variables { # : Generate the RRI release variables for primary / secondary for the specified RG # typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x # : resource group to determine # RGNAME=$1 OWNER_TYPE=$(isRGOwner $RGNAME) RRI_RUN=0 for res in $RRI_RESOURCES do RELEASE_FILENAME=$TEMPPATH_RRI$RGNAME.RELEASE_RESOURCES.$res if [[ -s $RELEASE_FILENAME ]] then RESOURCES_TO_RELEASE=$(cat $RELEASE_FILENAME | sed -e s/\"//g) if [[ -n $RESOURCES_TO_RELEASE ]] then if [[ $PROGNAME == "reconfig_resource_release_primary" && $OWNER_TYPE == "primary" ]] || [[ $PROGNAME == "reconfig_resource_release_secondary" && $OWNER_TYPE == "secondary" ]] then export $res="$RESOURCES_TO_RELEASE" # : if this is a GMVG we need to specify the same list of resources for VOLUME_GROUP # if [[ $res == "GMVG_REP_RESOURCE" ]] then if [[ -z $VOLUME_GROUP ]] then export VOLUME_GROUP="$RESOURCES_TO_RELEASE" else export VOLUME_GROUP="$RESOURCES_TO_RELEASE $VOLUME_GROUP" fi export FILESYSTEM="ALL" fi RRI_RUN=1 fi fi fi done return $RRI_RUN } ######################################################################### # # Name: rri_secondary_handler # # Description: Setup environment and call node_down_local to # handle the replicated resources for those groups # in secondary sustained state # # Arguments: none # # Returns: 1 if there is a failure in node_down_local, else 0 # ######################################################################### function rri_secondary_handler { # : Invoke node_down_local for any secondary instances # typeset PS4_FUNC=$0 [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset STATUS=0 # : Set the list of volume groups to empty so we do not pick up any additional vgs from : the resource group that are not replicated resources such as GMVGs # VOLUME_GROUP= for cgroup in $SECONDARY_SUSTAINED_GROUPS do for res in $RESOURCES $RRI_RESOURCES do export $res= done rri_release_variables $cgroup if (( $? == 1 )) then export NOT_REMOVING_GROUP="TRUE" export VG_RR_ACTION="RELEASE" export ASSOCIATE_ACTION="SUSTAIN" export PRIMARY_ACTION="NONE" export AUXILLIARY_ACTION="RELEASE_SECONDARY" export FOLLOWER_ACTION="RELEASE_SECONDARY" export GROUPNAME=$cgroup if ! node_down_local then # : If ANY failure has occurred, this script should exit accordingly # cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $cgroup. Manual intervention required.\n" $PROGNAME $cgroup STATUS=1 fi fi done return $STATUS } ######################################################################### # # # Name: create_associate_groups # # # # Description: Creates the list of associate resource group # # That is, those groups which are neither # # acquired nor released by this node # # # # # # Called by: # # # # Calls to: # # # # Arguments: none # # # # Returns: none # # # ######################################################################### function create_associate_groups { typeset PS4_FUNC=$0 ASSOCIATE_GROUPS="" RESOURCE_GROUPS=$(clodmget -f group -n HACMPgroup) for group in $RESOURCE_GROUPS do IN_SUSTAINED_GROUPS="$(echo $SUSTAINED_GROUPS | grep -w $group)" IN_RELEASED_GROUPS="$(echo $RELEASED_GROUPS | grep -w $group)" if [[ -z $IN_SUSTAINED_GROUPS && -z $IN_RELEASED_GROUPS ]] then NODELIST=$(clodmget -q "group = $group" -f nodes -n HACMPgroup | grep -w $LOCALNODENAME) if [[ -n $NODELIST ]] then ASSOCIATE_GROUPS="$ASSOCIATE_GROUPS $group" fi fi done } ######################################################################### # # Main # ######################################################################### PROGNAME=${0##*/} PROGNAME_UNLINKED="reconfig_resource_release" export PATH="$(/usr/es/sbin/cluster//utilities/cl_get_path all)" eval export $(cllsparam -x) [[ $VERBOSE_LOGGING == high ]] && set -x [[ $VERBOSE_LOGGING == high ]] && version='1.111.2.11' echo "$PROGNAME called at $(date) with arguments: $*" DCD="/etc/es/objrepos" SCD="/usr/es/sbin/cluster/etc/objrepos/stage" ACD="/usr/es/sbin/cluster/etc/objrepos/active" set -a eval $(ODMDIR=$ACD cllsparam -n $LOCALNODENAME) set +a export EVENT_TYPE=$PROGNAME_UNLINKED # Tell other scripts who called them RRI_RESOURCES="PPRC_REP_RESOURCE ERCMF_REP_RESOURCE SVCPPRC_REP_RESOURCE GMVG_REP_RESOURCE \ SR_REP_RESOURCE TC_REP_RESOURCE GENXD_REP_RESOURCE" RESOURCES_TO_RELEASE="" FILENAME="" RESOURCES="DISK VOLUME_GROUP CONCURRENT_VOLUME_GROUP FILESYSTEM FSCHECK_TOOL \ RECOVERY_METHOD EXPORT_FILESYSTEM APPLICATIONS MOUNT_FILESYSTEM SERVICE_LABEL \ INACTIVE_TAKEOVER SSA_DISK_FENCING TAKEOVER_LABEL NFS_HOST \ AIX_CONNECTIONS_SERVICES COMMUNICATION_LINKS AIX_FAST_CONNECT_SERVICES \ SHARED_TAPE_RESOURCES FS_BEFORE_IPADDR FORCED_VARYON \ PRINCIPAL_ACTION ASSOCIATE_ACTION AUXILLIARY_ACTION VG_RR_ACTION \ SIBLING_NODES FOLLOWER_ACTION PPRC_REP_RESOURCE GMD_REP_RESOURCE \ ERCMF_REP_RESOURCE SVCPPRC_REP_RESOURCE VG_AUTO_IMPORT FS_BEFORE_IPADDR \ SR_REP_RESOURCE TC_REP_RESOURCE GENXD_REP_RESOURCE\ OEM_VOLUME_GROUP OEM_FILESYSTEM GMVG_REP_RESOURCE \ EXPORT_FILESYSTEM_V4 STABLE_STORAGE_PATH WPAR_NAME" UDRESTYPE_LIST=$(/usr/es/sbin/cluster/utilities/cludrestype -l -h | awk ' /USERDEFINED/ { printf("%s ",$1); }' ) RESOURCES="$RESOURCES $UDRESTYPE_LIST" NFS_RESOURCES="MOUNT_FILESYSTEM NFS_HOST" # # these are the working directories for DARE - the location must # match the declaration in the other scripts involved # export TEMPPATH="/var/hacmp/log/HACMP_RESOURCES/" export TEMPPATH_RRI=/var/hacmp/log/HACMP_REP_RESOURCES/ # : This will be the exit status seen by the Cluster Manager. : If STATUS is not 0, the Cluster Manager will enter reconfiguration : All lower-level scripts should pass status back to the caller. : This will allow a Resource Groups to be processed individaully, : independent of the status of another resource group. # integer STATUS=0 set -u if (( $# != 0 )) then cl_echo 1035 "Usage: $PROGNAME\n" $PROGNAME exit 2 fi # : Ensure that the ACD directory exists # if [[ ! -d $ACD ]] then cl_log 1042 "$ACD does not exist\n" $ACD exit 1 fi # : Ensure that the SCD directory exists # if [[ ! -d $SCD ]] then cl_log 1042 "$SCD does not exist\n " $SCD exit 1 fi # : Create the temporary directory # if [[ $PROGNAME != "reconfig_resource_release_secondary" ]] then rm -rf $TEMPPATH mkdir $TEMPPATH fi # : If the directory already exists, destroy the original and recreate # if [[ $PROGNAME == "reconfig_resource_release_primary" || $PROGNAME == "reconfig_resource_release" ]] then # : Only do this if we are the primary script and reconfig_resource_release # if [[ -d ${TEMPPATH_RRI} ]] then rm -rf ${TEMPPATH_RRI} fi mkdir -p ${TEMPPATH_RRI} /usr/es/sbin/cluster/utilities/clRGinfo -s > $TEMPPATH_RRI/clRGinfo.out fi # : Set the RESOURCE_GROUPS environment variable with the names : of all currently active Resource Groups on the local node, : via clsetenvgrp with the ODMDIR set to the Active Configuration : Directory. # export ODMDIR=$ACD rm -f /tmp/.RPCLOCKDSTOPPED # : This will return list of resource groups to be released/sustained # set -a eval $(clsetenvgrp $LOCALNODENAME $PROGNAME_UNLINKED) RC=$? set +a if (( $RC != 0 )) then STATUS=1 fi # : Obtain the list of secondary instances that will be sustained # SECONDARY_SUSTAINED_GROUPS=$(rri_get_secondary_sustained) SUSTAINED_GROUPS="" RELEASED_GROUPS="" ASSOCIATE_GROUPS="" export RELEASED_GROUPS PRINCIPAL_ACTIONS_TMP="$PRINCIPAL_ACTIONS" for group in $RESOURCE_GROUPS do echo $PRINCIPAL_ACTIONS_TMP | read action PRINCIPAL_ACTIONS_TMP case $action in S ) SUSTAINED_GROUPS="$SUSTAINED_GROUPS $group" ;; R ) if [[ $RG_DEPENDENCIES == "FALSE" ]] then RELEASED_GROUPS="$RELEASED_GROUPS $group" fi ;; N ) ASSOCIATE_GROUPS="$ASSOCIATE_GROUPS $group" ;; esac done # : Recreate the list of accosiate_groups to ensure it is correct # create_associate_groups export UPDATESTATD=0 # : Save the current and new lists of resources in groups that remain on this : node. The difference indicates what has to be released. # if [[ $PROGNAME == "reconfig_resource_release_primary" || $PROGNAME == "reconfig_resource_release" ]] then # : Perform this operation for both the primary instance, and for the secondary instance # for group in $SUSTAINED_GROUPS $ASSOCIATE_GROUPS do for xCD in $ACD $SCD do export ODMDIR=$xCD if [[ $xCD == $ACD ]] then export FILEEXTENSION="CURRENT" else export FILEEXTENSION="NEW" fi save_rri_resources save_resources done #ACD/SCD done # sustained and associate groups # : Generate the acquire / release files for replicated resources # rri_generate_acquire_release fi if [[ $PROGNAME == "reconfig_resource_release_secondary" ]] then if ! rri_secondary_handler then exit 1 fi exit 0 fi # : If this node is giving up a resource group, release all its owned : resources # for group in $RELEASED_GROUPS do # : Set the Resource Environment variables to the list of : resources in this group in the ACD # export ODMDIR=$ACD set -a eval $(clsetenvres $group $PROGNAME_UNLINKED) set +a export GROUPNAME=$group # : Release these resources, using the node_down_local script # export PRINCIPAL_ACTION="RELEASE" export VG_RR_ACTION="RELEASE" if ! node_down_local then # : If ANY failure has occurred, this script should exit accordingly # cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $cgroup. Manual intervention required.\n" $PROGNAME $group STATUS=1 fi UPDATESTATD=1 # : Check to see if any released volume groups were left passively varied : on. If so, vary them off # pvo_check done # : For groups that this node remains a member of, check to see if any NFS : related configuration has changed. # for cgroup in $ASSOCIATE_GROUPS do export ODMDIR=$ACD RUN_SCRIPT="false" for res in $NFS_RESOURCES do sort $TEMPPATH$cgroup.CURRENT.$res | uniq > $TEMPPATH$cgroup.CURRENT.$res.TMP sort $TEMPPATH$cgroup.NEW.$res | uniq > $TEMPPATH$cgroup.NEW.$res.TMP done # : Handle NFS special case resources MOUNT_FILESYSTEM and NFS_HOST # nfs_handler # : Release these resources # if [[ $RUN_SCRIPT == true ]] then export ODMDIR=$ACD # : If cascading resource and NFS_groupname TRUE, actually : removing resource. Otherwise, not removing - pass dummy # set +u eval TEMPNFS=$"NFS_$cgroup" set -u if [[ $TEMPNFS == "TRUE" ]] then export NOT_REMOVING_GROUP="FALSE" else export NOT_REMOVING_GROUP="TRUE" fi export GROUPNAME=$cgroup export PRINCIPAL_ACTION="RELEASE" export VG_RR_ACTION="RELEASE" node_down_local RC=$? if (( 0 != $RC )) then # : If ANY failure has occurred, this script should exit accordingly # cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $cgroup. Manual intervention required.\n" $PROGNAME $cgroup STATUS=1 fi UPDATESTATD=1 fi # : This node is in the membership list of this resource group either in : the new or the old configuration, or both. However, in neither does : it own the resource group. Still, check to see if passive varyons : need to be maintained. # associate_check $cgroup done # : If a resource group is in the current configuration and in the : new configuration, then only those specific resources in the : current configuration but not in the new configuration should : be unconfigured. # for cgroup in $SUSTAINED_GROUPS do export ODMDIR=$ACD RUN_SCRIPT="false" # : Determine which resources, if any, have been removed. : Sort the current and new files, removing duplicates. # for res in $RESOURCES do sort $TEMPPATH$cgroup.CURRENT.$res | uniq > $TEMPPATH$cgroup.CURRENT.$res.TMP sort $TEMPPATH$cgroup.NEW.$res | uniq > $TEMPPATH$cgroup.NEW.$res.TMP # : For the remainder of this loop, skip the MOUNT_FILESYSTEMS and : NFS_HOST resources - these will be handled in nfs_handler # if [[ $res == "MOUNT_FILESYSTEM" || $res == "NFS_HOST" ]] then # : create the empty files now so that future cat commands do not fail # echo "" > $TEMPPATH$cgroup.RELEASE_RESOURCES.$res echo "" > $TEMPPATH$cgroup.ACQUIRE_RESOURCES.$res continue fi # : Compare these files and create two new files - release_resources and : acquire_resources All resources which are current but not in the new : configuration should be released. All resources which are new but not : in the current configuration should be acquired. # comm -1 -3 $TEMPPATH$cgroup.CURRENT.$res.TMP $TEMPPATH$cgroup.NEW.$res.TMP > $TEMPPATH$cgroup.ACQUIRE_RESOURCES.$res comm -2 -3 $TEMPPATH$cgroup.CURRENT.$res.TMP $TEMPPATH$cgroup.NEW.$res.TMP > $TEMPPATH$cgroup.RELEASE_RESOURCES.$res # : MOUNT ALL FILESYSTEMS requires special processing to make sure : that if just converting from specifying specific FILESYSTEMS to : ALL or vice versa, that FILESYSTEMS are not umounted when they : will still be up after the reconfig. MOUNT ALL FILESYSTEMS also : requires special processing to make sure that if any VGs are : going down or coming up, their FILESYSTEMs get unmounted or : mounted, too. The logic here depends on the current order of : processing, with VGs processed before FSs # if [[ $res == "FILESYSTEM" ]] then FSCUR=$(cat $TEMPPATH$cgroup.CURRENT.FILESYSTEM.TMP | cut -d'"' -f2) FSNEW=$(cat $TEMPPATH$cgroup.NEW.FILESYSTEM.TMP | cut -d'"' -f2) VGCUR=$(cat $TEMPPATH$cgroup.CURRENT.VOLUME_GROUP.TMP | cut -d'"' -f2) VGNEW=$(cat $TEMPPATH$cgroup.NEW.VOLUME_GROUP.TMP | cut -d'"' -f2) if [[ $FSCUR != $FSNEW ]] then if [[ $FSCUR == "ALL" ]] then echo > "$TEMPPATH$cgroup.RELEASE_RESOURCES.FILESYSTEM" FSREL=$(lsvg -L -l $VGCUR | awk '$2 ~ /jfs2*$/ && $7 ~ /^\// {print $7}') for fs in $FSREL do found=0 for FS in $FSNEW do if [[ $fs == $FS ]] then found=1 break fi done if (( $found == 0 )) then echo "\"$fs\"" >> "$TEMPPATH$cgroup.RELEASE_RESOURCES.FILESYSTEM" fi done else if [[ $FSNEW == "ALL" ]] then echo > "$TEMPPATH$cgroup.RELEASE_RESOURCES.FILESYSTEM" for fs in $FSCUR do found=0 vg=$(cl_fs2disk -v $fs) for VG in $VGNEW do if [[ $vg == $VG ]] then found=1 break fi done if (( $found == 0 )) then echo "\"$fs\"" >> "$TEMPPATH$cgroup.RELEASE_RESOURCES.FILESYSTEM" fi done fi fi fi if [[ -s "$TEMPPATH$cgroup.RELEASE_RESOURCES.VOLUME_GROUP" && ! -s "$TEMPPATH$cgroup.RELEASE_RESOURCES.FILESYSTEM" ]] then if [[ $FSCUR == "ALL" ]] then cat $TEMPPATH$cgroup.CURRENT.FILESYSTEM.TMP > "$TEMPPATH$cgroup.RELEASE_RESOURCES.FILESYSTEM" fi fi if [[ -s "$TEMPPATH$cgroup.ACQUIRE_RESOURCES.VOLUME_GROUP" && ! -s "$TEMPPATH$cgroup.ACQUIRE_RESOURCES.FILESYSTEM" ]] then if [[ $FSNEW == "ALL" ]] then cat $TEMPPATH$cgroup.NEW.FILESYSTEM.TMP > "$TEMPPATH$cgroup.ACQUIRE_RESOURCES.FILESYSTEM" fi fi fi if [[ $res == "AIX_FAST_CONNECT_SERVICES" ]] then # : AIX_FAST_CONNECT_SERVICES has trouble with DAREing out : file/print shares if file/print shares remain in the resource : group. The acquire file is then the union of the output of : comm -1 -3 and comm -1 -2. The same is true for release... # comm -1 -2 $TEMPPATH$cgroup.CURRENT.$res.TMP $TEMPPATH$cgroup.NEW.$res.TMP >> $TEMPPATH$cgroup.RELEASE_RESOURCES.$res comm -1 -2 $TEMPPATH$cgroup.CURRENT.$res.TMP $TEMPPATH$cgroup.NEW.$res.TMP >> $TEMPPATH$cgroup.ACQUIRE_RESOURCES.$res fi # : Clear the current environment variables # export $res="" VARIABLE="" # : Walk through the RELEASE_RESOURCES file, exporting the environment variables found # for variable in $(cat $TEMPPATH$cgroup.RELEASE_RESOURCES.$res | cut -d'"' -f2) do if [[ -n $variable ]] then RUN_SCRIPT="true" if [[ -z $VARIABLE ]] then VARIABLE=${variable} else VARIABLE="${VARIABLE} ${variable}" fi fi done export $res="$VARIABLE" done # : Export the RRI variables # rri_release_variables $cgroup # : Force the node_down_local if we have found RRI resources to release # if (( $? == 1 )) then RUN_SCRIPT=true fi # : Handle NFS special case resources MOUNT_FILESYSTEM and NFS_HOST # nfs_handler # : Release these resources # if [[ $RUN_SCRIPT == "true" ]] then export ODMDIR=$ACD # : If cascading resource and NFS_groupname TRUE, actually : removing resource. Otherwise, not removing - pass dummy # set +u eval TEMPNFS=$"NFS_$cgroup" set -u if [[ $TEMPNFS == "TRUE" ]] then export NOT_REMOVING_GROUP="FALSE" else export NOT_REMOVING_GROUP="TRUE" fi export GROUPNAME=$cgroup export PRINCIPAL_ACTION="RELEASE" export VG_RR_ACTION="RELEASE" export RR_ACTION_FOR_GENXD="RELEASE" if ! node_down_local then # : If ANY failure has occurred, this script should exit accordingly # cl_log 650 "$PROGNAME: Failure occurred while processing Resource Group $cgroup. Manual intervention required.\n" $PROGNAME $cgroup STATUS=1 fi UPDATESTATD=1 # : Check to see if any released volume groups were left passively varied : on. If so, vary them off # if [[ -n $VOLUME_GROUP || -n $FILESYSTEM ]] then pvo_check fi fi done if [[ $RG_DEPENDENCIES == "TRUE" ]] then export ODMDIR=$ACD process_resources fi ## ## Note to maintainers: ## ## Typically, all resource and resource group processing is symmetrical. ## If a resource is brought down in reconfig_resource_release, it will be ## brought up in reconfig_resource_acquire, and vice-versa. In the case ## of WLM integration, however, this is not true. With WLM support, the ## user would require the old WLM configuration to be stopped after _all_ ## resource processing is complete in reconfig_resource_release, and then ## restarted just before any resource processing is done in ## reconfig_resource_acquire. This is not done, because the processing ## would be redundant. The WLM would be stopped here, and then immediately ## restarted in the next script. Thus, there is no WLM processing in ## the release script. All necessary processing is done in the acquire script. ## if [[ -f /tmp/.RPCLOCKDSTOPPED ]] # did we stop rpc.lockd? then # : If, earlier in this process, we stopped rpc.lockd, make sure its stopped, : and then start it up again - it has to be inoperative to be restartable # rm -f /tmp/.RPCLOCKDSTOPPED if ! LC_ALL=C lssrc -s rpc.lockd | grep -qw inoperative then # give it time to stop cl_echo 1111 "$PROGNAME: Waiting up to $COUNT seconds for rpc.lockd to go inactive\n" $PROGNAME for (( COUNT=0 ; COUNT < 60 ; COUNT-- )) do sleep 1 if LC_ALL=C lssrc -s rpc.lockd | grep -qw inoperative then # if its finally stopped break # we can quit fi set +x # don't really need to trace loop # more than once done [[ $VERBOSE_LOGGING == "high" ]] && set -x # start tracing again fi # : Now that its completely stopped, we can start it again # startsrc -s rpc.lockd fi # : check if backup profiles are configured and trigger cbm functionalities based on it # if [[ -n $(ODMDIR=$DCD clodmget -q "name=BACKUP_ENABLED" -f value HACMPresource) ]] || [[ -n $(ODMDIR=$ACD clodmget -q "name=BACKUP_ENABLED" -f value HACMPresource) ]]; then #Starting and stoping of remote_copy, based on the changes in remote backup profile when cluster is online. #This piece of code will not update the global variable "STATUS" as to not effect the PowerHA functionality. typeset ORIGINAL_ODMDIR=$ODMDIR typeset ORIGINAL_PRINCIPAL_ACTION=$(echo $PRINCIPAL_ACTION) typeset ORIGINAL_GROUPNAME="" set +u if [[ -n $GROUPNAME ]]; then typeset ORIGINAL_GROUPNAME=$(echo $GROUPNAME) fi set -u tempfile="/var/hacmp/log/remote_copy_acd_dcd_diff" xml="cloud_backup_configuration.xml" typeset -i return_value=0 sort $DCD/$xml $ACD/$xml | uniq -u > $tempfile 2>/dev/null if [[ -s $tempfile ]]; then #Get backup profiles from DCD data and check if there is any change in remote storage profiles DCD_backup_profiles=$(ODMDIR=$DCD LANG=C cl_cbm_list events=1 2>/dev/null) ACD_backup_profiles=$(ODMDIR=$ACD LANG=C cl_cbm_list events=1 2>/dev/null) for bp in $DCD_backup_profiles do DCD_profile_details=$(ODMDIR=$DCD LANG=C cl_cbm_list $bp events=1 2>/dev/null) return_value=$? if (( $return_value != 0 ));then dspmsg -s 43 scripts.cat 86 "$PROGNAME: Failed to get backup profile details of $bp.\n" $PROGNAME $bp continue fi backup_method=$(echo "$DCD_profile_details" | grep -w Backup_method | cut -f2 -d "=") backup_method=${backup_method// /} if [[ $backup_method == "remote_storage" ]];then #This is remote storage profile. Get ACD data and check if there is mismatch between DCD and ACD data, #invoke script "cl_cbm_remote_backup" to start or stop the consistency group accordingly, only if the resource group #which is mapped to this backup profile is "ONLINE". "ACQUIRE" is an indication to start consistency group #and "RELEASE" is to stop the consistency group. group_info=$(clRGinfo -s $bp | grep -w ONLINE) ACD_profile_details=$(ODMDIR=$ACD LANG=C cl_cbm_list $bp events=1 2>/dev/null) DCD_enable_backup=$(echo "$DCD_profile_details" | grep -w Enable_backup | cut -f2 -d "=") DCD_enable_backup=${DCD_enable_backup// /} #Script "cl_cbm_remote_backup" will log to hacmp.out in case of any error for all the below scenarios,hence logging #is not required, and it always returns success i.e "0" hence return value is not checked. if [[ -z $ACD_profile_details && -n $group_info && $DCD_enable_backup == "yes" ]];then #New profile is added, start the consistency group(s) defined for this profile. $(ODMDIR=$DCD PRINCIPAL_ACTION="ACQUIRE" GROUPNAME="$bp" cl_cbm_remote_backup >/dev/null 2>&1) elif [[ -n $ACD_profile_details && -n $group_info ]];then ACD_enable_backup=$(echo "$ACD_profile_details" | grep -w Enable_backup | cut -f2 -d "=") ACD_enable_backup=${ACD_enable_backup// /} if [[ $ACD_enable_backup == "no" && $DCD_enable_backup == "yes" ]];then #Backup profile exists in both DCD and ACD but the "Enable_backup" which was "no" is made "yes" so start consistency group(s). $(PRINCIPAL_ACTION="ACQUIRE" GROUPNAME="$bp" cl_cbm_remote_backup >/dev/null 2>&1) elif [[ $ACD_enable_backup == "yes" && $DCD_enable_backup == "no" ]];then #Backup profile exists in both DCD and ACD but the "Enable_backup" is made "no" so stop consistency group(s). $(PRINCIPAL_ACTION="RELEASE" GROUPNAME="$bp" cl_cbm_remote_backup >/dev/null 2>&1) fi fi fi #Remove the profile from ACD which exists in DCD. After all the iterations ACD_backup_profiles will be left out with profiles which are not in DCD. #We need to stop remote copy for these profiles. ACD_backup_profiles=${ACD_backup_profiles/$bp/} done #Stop the remote copy for the profiles which are removed by the user i.e which does not exist in DCD for bp in $ACD_backup_profiles do ACD_profile_details=$(ODMDIR=$ACD LANG=C cl_cbm_list $bp events=1 2>/dev/null) return_value=$? if (( $return_value != 0 ));then dspmsg -s 43 scripts.cat 86 "$PROGNAME: Failed to get backup profile details of $bp.\n" $PROGNAME $bp continue fi enable_backup=$(echo "$ACD_profile_details" | grep -w Enable_backup | cut -f2 -d "=") enable_backup=${enable_backup// /} backup_method=$(echo "$ACD_profile_details" | grep -w Backup_method | cut -f2 -d "=") backup_method=${backup_method// /} if [[ $backup_method == "remote_storage" && $enable_backup == "yes" ]];then $(ODMDIR=$ACD PRINCIPAL_ACTION="RELEASE" GROUPNAME="$bp" cl_cbm_remote_backup >/dev/null 2>&1) fi done export ODMDIR="$ORIGINAL_ODMDIR" export PRINCIPAL_ACTION="$ORIGINAL_PRINCIPAL_ACTION" if [[ -n $ORIGINAL_GROUPNAME ]]; then export GROUPNAME="$ORIGINAL_GROUPNAME" fi fi if (( $return_value == 0 ));then rm -rf $tempfile 2>/dev/null fi fi exit $STATUS