#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2017,2021. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # 61haes_r721 src/43haes/usr/sbin/cluster/events/utils/cl_pvo.sh 1.34.2.12 # # Licensed Materials - Property of IBM # # Restricted Materials of IBM # # COPYRIGHT International Business Machines Corp. 2003,2016 # All Rights Reserved # # US Government Users Restricted Rights - Use, duplication or # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # # IBM_PROLOG_END_TAG # @(#) 7d4c34b 43haes/usr/sbin/cluster/events/utils/cl_pvo.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM ############################################################################### #================================================ # The following, commented line enforces coding # standards when this file is edited via vim. #================================================ # vim:tabstop=4:shiftwidth=4:expandtab:smarttab #================================================ # Including file containing SCSIPR functions . /usr/es/sbin/cluster/events/utils/cl_scsipr_event_functions ############################################################################ # # Function: mr_recovery # # Description: Once a volume group has been vary'd on, it may still # have missing or removed disks. This is particularly # likely after a forced varyon. # # Look for such disks, and attempt to make them available # with the chpv command. # # Input: Volume group name # # Output: None # ############################################################################ function mr_recovery { typeset PS4_FuNC="mr_recovery" [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset vg=$1 typeset mr_disks typeset disk_list typeset hdisk missing_disks=$(LC_ALL=C lsvg -p $vg 2>/dev/null | grep -iw 'missing') if [[ -n $missing_disks ]] then # : There are missing physical volumes. Try to get them back # disk_list="" print "$missing_disks" | while read hdisk rest do disk_list=${disk_list:+"$disk_list "}"${hdisk}" done # : First, try to make the volumes accessible at the device level # cl_disk_available -s -v $disk_list # : Then, try to vary on the volume group again, to get LVM to : recognize the disks. # varyonvg $vg_force_on_flag -n -c -P $vg fi removed_disks=$(LC_ALL=C lsvg -p $vg 2>/dev/null | grep -iw 'removed') if [[ -n $removed_disks ]] then # : There are removed physical volumes. Sometimes, : these can be recovered by a chpv. Give it a try. # disk_list="" print "$removed_disks" | while read hdisk rest do disk_list=${disk_list:+"$disk_list "}"${hdisk}" done if ! LC_ALL=C lsvg -L $vg 2>/dev/null | grep -q -i -w 'passive-only' then chpv -v a $disk_list fi fi } ############################################################################ # # Function: cleanup_vg # # Description: Attempt to unmount any file systems for a given volume # group, and then vary off the volume group. This is used # when the volume group has been forced off (perhaps due # to lack of quorum) but left vary'd on. # # Input: Volume group name # # Output: Return code from any umount or varyoffvg failure # ############################################################################ function cleanup_vg { typeset PS4_FUNC="cleanup_vg" [[ $VERBOSE_LOGGING == "high" ]] && set -x set -u typeset VG=$1 # volume group name typeset hdisklist=$2 # space separated list of hdisks in volume group typeset dev # logical volume device typeset mount_pnt # file system mount point typeset rest # ignored fields typeset force_flag # can force unmount a JFS2 filesystem integer rc # return code force_flag="" # : All mounted file systems # typeset mount_lst=$(mount | tail +3 | tr -s ' ' | cut -f1-4 -d' ') # : All logical volumes in $VG # typeset lv_lst=$(clodmget -q "name = $VG" -f dependency -n CuDep) # : Each of the V, R, M and F fields are padded to fixed length, : to allow reliable comparisons. E.g., maximum VRMF is : 99.99.999.999 # integer V R M F typeset -Z2 R # two digit release typeset -Z3 M # three digit modification typeset -Z3 F # three digit fix integer jfs2_lvl=601002000 # minimum JFS2 level needed for forced unmount integer VRMF=0 # : Here try and figure out what level of JFS2 is installed # lslpp -lcqOr bos.rte.filesystem | cut -f3 -d':' | IFS=. read V R M F VRMF=$V$R$M$F # get the JFS2 level if (( $VRMF >= $jfs2_lvl )) then # : JFS2 at the level that supports forced unmount # force_flag="-f" fi # : Look for any mounted file systems still hanging around on $VG # for LV in $lv_lst do print "$mount_lst" | grep -w $LV | read dev mount_pnt fs_type rest [[ -z $fs_type ]] && continue # Not mounted if [[ $fs_type == "jfs2" ]] then umount $force_flag $mount_pnt elif [[ $fs_type == "jfs" ]] then umount $mount_pnt fi rc=$? : Return code from umount $force_flag $mount_pnt is $rc if (( $rc != 0 )) then # : Quit on first failure because if $mount_pnt cannot be : unmounted, then varyoff $VG will surely fail # return $rc fi fs_type="" mount_pnt="" done # : Having gotten rid of any file systems, try to vary off $VG # if lsvg -o -L | grep -x $VG then varyoffvg_out=$(varyoffvg $VG 2>&1) RC=$? if (( $RC != 0 )) && [[ $varyoffvg_out == @(*0516-010*) ]] then # : LVM says $VG was varied off. Treat as success # RC=0 fi else # : We are in this routine because volume group operations : are not working. We are here because the volume group : is not varied on. A possible cause of the problem is : the presence of reserves on the disks. Try to remove : any such. # cl_set_vg_fence_height -c $VG rw RC=$? if (( $ENODEV == $RC )) then # : In the event of a stale fence group, recreate it # cl_vg_fence_redo -c $VG rw RC=$? fi if (( $RC != 0 )) then # : Log any error, but continue. If this is a real problem, the varyonvg will fail # rw=$(dspmsg -s 103 cspoc.cat 350 'read/write' | cut -f2 -d,) cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read/write" $PROGNAME $vg $rw fi # : Make the disks available at the device level # cl_disk_available -v -s $hdisklist # : Just in case LVM needs its state cleaned up # varyoffvg_out=$(varyoffvg $VG 2>&1) RC=$? if (( $RC != 0 )) && [[ $varyoffvg_out == @(*0516-010*) ]] then # : LVM says $VG was varied off. Treat as success # RC=0 fi fi return $RC } ############################################################################### # # Name: get_vg_mode # # Function: Look at a set of disks to determine the volume group mode for # the volume group they are contained in. Assumes that all # disks are in the same volume group, so can stop at the first # one that can be read. # # Arguments: list of disks # vgid of presumed owning volume group # sync flag (may be null) # # Returns: If the mode can be read from the VGDA, $mode is set. # Otherwise, it ends up as 0 # # ############################################################################### function get_vg_mode { typeset PS4_FUNC="get_vg_mode" [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset vgid vg_name syncflag hdisklist typeset GROUP_NAME FORCED_VARYON integer TUR_RC=0 integer vg_disks=0 integer max_disk_test=0 integer disk_tested=0 hdisklist=$1 vgid=$2 vg_name=$3 syncflag=${4:-""} if [[ -n $(ODMDIR=/etc/objrepos odmget -q "name=${vg_name} and attribute=conc_capable and value=y" CuAt) ]] then # : If LVM thinks that this volume group is concurrent capable, that : is good enough # mode=32 return fi # : Look at each disk in the volume group, until we find one that : is readable - one which lqueryvg can tell us what kind of : volume group it is. # for hdisk in $hdisklist do # : Attempt to read the volume group type directly from the disk # modevgid=$(cl_queryvg -V $hdisk) query_rc=$? if (( $query_rc == 0 )) then if [[ -n $modevgid ]] then print $modevgid | read mode diskvgid if [[ $vgid == $diskvgid ]] then # : Quit because we have managed to read the volume group mode : from this disk, and this disk is part of the volume group # break # Found the volume group type fi fi elif (( $query_rc == 2 )) then # : Volume group in use on other node # mode=0 return fi # : While we were not able to read the disk, its not clear that this : is due to the volume group being in use by another cluster node. : It could be due to a hardware failure at the disk, enclosure, or : path level. # if (( $max_disk_test == 0 )) then # : Figure out here how many disks to test before giving up. : If forced varyon is an option, try up to all of them : If forced varyon is not allowed, its a quorum # vg_disks=$(print $hdisklist | wc -w) GROUP_NAME=$(clodmget -q "name = VOLUME_GROUP and value = $vg_name" -f group -n HACMPresource) if [[ -n $GROUP_NAME ]] ; then FORCED_VARYON=$(clodmget -q "group = $GROUP_NAME and name = FORCED_VARYON" -f value -n HACMPresource) fi if [[ $FORCED_VARYON == "true" ]] then max_disk_test=$vg_disks else max_disk_test=$(( $vg_disks/2 + 1 )) fi fi (( ++disk_tested )) if (( $disk_tested > $max_disk_test )) then # : We have tried enough disks, and none of them have worked. : Give up on this volume group. # break fi done if (( $mode == 32 )) then # : We are here because a query of the volume group attributes in CuAt : does not say that this volume group is concurrent capabile, but the : actual content of the disks say that it is. Try to update the ODM : information. A redefinevg is needed before importvg to get the VGDA : and ODM in sync. # redefinevg -d $hdisk ${vg_name} importvg -L ${vg_name} -R $hdisk fi } ############################################################################### # # Name: varyonp # # Function: Try to vary on the volume group in passive concurrent mode. # If that fails, used a forced vary on if appropriate. # # Arguments: volume group name # list of hdisks in volume group # sync flag (may be null) # # Returns: 0 # ############################################################################### function varyonp { typeset PS4_FUNC="varyonp" [[ "$VERBOSE_LOGGING" == "high" ]] && set -x integer NOQUORUM=20 # varyonvg return code for failure due to lack of quorum integer rc=0 # return code # : Pick up passed parameters: volume group and sync flag # typeset syncflag hdisklist vg vg=$1 hdisklist=$2 syncflag=${3:-""} # : Make sure the volume group is not fenced. Varyon requires read write : access. # cl_set_vg_fence_height -c $vg rw RC=$? if (( $ENODEV == $RC )) then # : In the event of a stale fence group, recreate it # cl_vg_fence_redo -c $vg rw RC=$? fi : Return code from volume group fencing for $vg is $RC if (( 0 != $RC )) then # : Note any problem with trying to set the fencing height to read/write : but allow operations to continue. If this is a real problem, the : varyon will fail, and will be handled below. # rw=$(dspmsg -s 103 cspoc.cat 350 'read/write' | cut -f2 -d,) cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read/write" $PROGNAME $vg $rw fi # : Try to vary on the volume group in passive concurrent mode # varyonvg $syncflag -c -P $O_flag $vg rc=$? if (( $rc != 0 )) then # : If gsclvmd was stopped, or the volume group lost quorum, : it would have been forced off. This could have caused : the above failure. Try to recover by varying off and on. # if cleanup_vg $vg "$hdisklist" then # : On successful clean up and vary off, try again # varyonvg $syncflag -c -P $O_flag $vg rc=$? else # : Varyoff failed, most likely because the logical volumes : were not closed. Try later if force option is enabled. # /bin/dspmsg scripts.cat 28 "$PROGNAME: Failed varyoff of $vg\n" $PROGNAME $vg fi fi : exit status of varyonvg $syncflag -c -P $O_flag $vg is: $rc if (( $rc == $NOQUORUM )) then # : If the varyon failed due to lack of quorum, go see if there is : at least one copy of the data available on the disks that can : be read, making it worthwhile to try a forced varyon. # if cl_mirrorset $vg then # : One copy of the data is available - try using force # vg_force_on_flag='-f' varyonvg -f $syncflag -c -P $O_flag $vg rc=$? : exit status of varyonvg -f $syncflag -c -P $O_flag $vg is $rc fi fi # : If varyon was ultimately unsuccessful, note the error # if (( $rc != 0 )) ; then cl_log 296 "$PROGNAME: Failed to vary on volume group $vg in passive mode" $PROGNAME $vg else # : If varyonvg was successful, try to recover : any missing or removed disks # mr_recovery $vg fi # : Restore the fence height to read only, for passive varyon # cl_set_vg_fence_height -c $vg ro RC=$? : Return code from volume group fencing for $vg is $RC if (( 0 != $RC )) then # : Note any problem with trying to set the fencing height to read/only : but allow operations to continue. # ro=$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,) cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read only" $PROGNAME $vg $ro fi return 0 } ############################################################################### # # # Name: cl_pvo - Passive Varyon # # # Function: Find the volume groups that are enhanced concurrent mode # volume groups, but are uses serially by resource groups that # this node participates in. Vary them on in passive mode. # # # Arguments: If no input, process all volume groups in resources groups # that this node is a member of, or # # -g # process all volume groups in the given resource # group, or # # -v "" # process all volume groups in the list, and/or # # -f "" # process all volume groups that own the file # systems in the list # # The current node name is picked up from $LOCALNODE # HACMP resource group information is read from HACMPresource # # # Environment: LOCALNODENAME, PATH, EMULATE, PRE_EVENT_MEMBERSHIP # # # Returns: 0 - If validly invoked, this script always returns a success # indication. This is because even if a passive varyon # fails, a normal varyon will be tried later during event # processing. # # 1 - Invoked with an invalid option # # Questions? Comments? Expressions of Astonishment? mailto:hafeedbk@us.ibm.com # ############################################################################### PROGNAME=${0##*/} export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)" if [[ $VERBOSE_LOGGING == "high" ]] then if [[ -z $PS4 || $PS4 == '+ ' ]] then eval export $(cllsparam -x) fi set -x version='1.34.2.12' fi PS4_TIMER="true" integer rc=0 # return code integer mode=0 # what kind of volume group # 0 => non-concurrent or RAID concurrent # 1 => SSA or 9333 # 16 => SSA or 9333 # 32 => enhanced concurrent mode integer ENODEV=19 # standard fence failure return code vg_force_on_flag="" # force varyon allowed # : Pick up any passed options # export rg_list="" # list of resource groups vg_list="" # list of volume groups fs_list="" # list of file systems all_vgs_flag="" # invoked at node up for all ECM VGs if [[ -z $* ]] # If no parameters then all_vgs_flag="true" # Then must be for all volume groups fi while getopts ":g:v:f:" option ; do case $option in g ) : passed a resource group name ; rg_list=$OPTARG ;; v ) : passed a list of volume groups ; vg_list=$OPTARG ;; f ) : passed a list of file systems ; fs_list=$OPTARG ;; * ) : anything else dspmsg scripts.cat 6555 "Option \"-${OPTARG}\" is not valid\n" "-${OPTARG}" return 1 ;; esac done shift $((OPTIND - 1)) if [[ -n $* ]] then # : If multiple volume groups or file systems are passed in, : pick them all up. # if [[ -n $vg_list ]] then vg_list="$vg_list $*" elif [[ -n $fs_list ]] then fs_list="$fs_list $*" fi fi O_flag="" if [[ -n $(odmget -q "attribute = varyon_state" PdAt) ]] then # : LVM may record that a volume group was varied on from an earlier : IPL. Rely on HA state tracking, and override the LVM check # O_flag='-O' fi if [[ -n $all_vgs_flag ]] # No resource names given explicitly then if [[ -z $LOCALNODENAME ]] then export LOCALNODENAME=$(get_local_nodename) fi if [[ -z $LOCALNODENAME ]] then # : If no resources were explicitly passed, we go for all the ones : this node deals with. However, if the local node name is not : available, cannot proceeed. # cl_log 297 "$PROGNAME: ERROR: The environment variable LOCALNODENAME was not set" $PROGNAME exit 2 fi # : Since no resource names of any type were explicitly passed, go : find all the resource groups this node is a member of. # rg_list=$(clodmget -f group,nodes HACMPgroup | egrep "[: ]${LOCALNODENAME}( |$)" | cut -f1 -d:) if [[ -z $rg_list ]] then # : This node does not appear to participate in any resource groups. : Strange... At any rate, there is nothing to do. # return 0 fi fi if [[ -z $vg_list && -n $rg_list ]] # if not given volume groups then # : Since no volume groups were passed, go find all the volume groups : in the given/extracted list of resource groups. # for group in $rg_list do # : For each resource group that this node participates in, get the : list of serial access volume groups in that resource group. # rg_vg_list=$(clodmget -q "group = $group and name = VOLUME_GROUP" -f value -n HACMPresource) if [[ -n $rg_vg_list ]] then if [[ -n $all_vgs_flag && -n $(odmget -q "group = $group and name like '*REP_RESOURCE'" HACMPresource) ]] then # : If this resource group contains replicated resources, : and the volume group was not explicitly specified, skip : these volume groups if this is the first node up on the : site. # # This is because volume groups associated with replicated # resources can only be brought on line when the replicated # resources are in the right state. This is typically after # the predisk_available routine has run. See process_resources.sh # this_site_nodes=$(cllssite -c | tail +2 | cut -f2 -d: | grep -w $LOCALNODENAME) if [[ $this_site_nodes == $LOCALNODENAME ]] then continue # We are the first, so wait for predisk_available else # : This detects the case that the first node on this site came up : sometime before this node was online. If the owning resource : group $group is on line on some other node on this site, its : safe to do a passive varyon. # # Since predisk_available has already been run on this site for $group # rg_node_list=$(clodmget -q "group = $group" -f nodes -n HACMPgroup | tr ' ' '\n' | paste -s -d'|' -) this_site_other_rg_nodes=$(print $this_site_nodes | tr ' ' '\n' | grep -v $LOCALNODENAME | egrep -x "$rg_node_list" | paste -s -d'|' -) if [[ -z $this_site_other_rg_nodes || -z $(LC_ALL=C clRGinfo $group 2>/dev/null | egrep -w "$this_site_other_rg_nodes" | grep -w ONLINE | grep -v SECONDARY) ]] then continue # We are the first, so wait for predisk_available fi fi fi # : If there were any serial access volume groups for this node and : that resource group, add them to the list. # vg_list=${vg_list:+$vg_list" "}$rg_vg_list fi done fi if [[ -z $fs_list && -n $rg_list ]] # if not given file systems then # : Since no file systems were passed, go find all the file systems in : the given/extracted list of resource groups. # for group in $rg_list ; do # : For each resource group that this node participates in, get the : list of file systems in that resource group. # # This is necessary because the user could specify just file # systems in the resource group definition, and not volume groups. # rg_fs_list=$(clodmget -q "group = $group and name = FILESYSTEM" -f value -n HACMPresource) if [[ -n $rg_fs_list ]] then if [[ -n $all_vgs_flag && -n $(odmget -q "group = $group and name like '*REP_RESOURCE'" HACMPresource) ]] then # : If this resource group contains replicated resources, : skip the volume groups when invoked without explicitly : specifying volume groups. # This is because volume groups associated with replicated # resources can only be brought on line when the replicated # resources are in the right state. This is typically after # the predisk_available routine has run. See process_resources.sh # continue fi # : If there were any file systems for this node and that resource : group, add them to the list # fs_list=${fs_list:+$fs_list" "}$rg_fs_list fi done fi for filesys in $fs_list # if we found any file systems, do if [[ $filesys == ALL ]] # if ALL was specified then continue # a volume group was given fi # : Get the volume group corresponding to that file system # vg_list=${vg_list:+$vg_list" "}$(cl_fs2disk -v $filesys) done # : Remove any duplicates from the volume group list # vg_list=$(echo $vg_list | tr ' ' '\n' | sort -u) if [[ -z $vg_list ]] then # : This node does not have serial access to VGs for PowerHA, return # return 0 fi # : Find out what volume groups are currently on-line # ON_LIST=$(print $(lsvg -L -o 2> /tmp/lsvg.err)) # : If this node is the first node up in the cluster, : we want to do a sync for each of the volume groups : we bring on-line. If multiple cluster nodes are already active, the : sync is unnecesary, having been done once, and possibly disruptive. # if [[ -n $PRE_EVENT_MEMBERSHIP ]] then # : Other cluster nodes are present - do not sync to avoid : interference with any other node using this volume group # syncflag="-n" else # : No other cluster nodes are present, default to sync just to be sure : the volume group is in a good state # syncflag="" fi # : Now, process each volume group in the list of those this node acceses. # for vg in $vg_list do typeset PS4_LOOP="$vg" # : Skip any concurrent GMVGs, they should never be pvo. # if [[ -n $(odmget -q "name=GMVG_REP_RESOURCE AND value=$vg" HACMPresource) ]] ; then continue fi # : The VGID is what the LVM low level commands used below use to : identify the volume group. # if ! vgid=$(/usr/sbin/getlvodm -v $vg) then continue # cannot process this volume group fi mode=99 # until we know more # : Attempt to determine the mode of the volume group - is it an : enhanced concurrent mode volume group or not. # # Note the use of quoting to pass the hdisk list as a single parameter. # export mode # set by get_vg_mode hdisklist="" # list of disks in the volume group /usr/sbin/getlvodm -w $vgid | while read pvid hdisk ; do hdisklist=${hdisklist:+$hdisklist" "}${hdisk} done get_vg_mode "$hdisklist" $vgid $vg $syncflag # : See if the volume group is already on line. This should : only happen if it were manually brought on line outside of HACMP : control, or left on-line after a forced down. # typeset vg_on_mode="" if [[ $ON_LIST == ?(* )$vg?( *) ]] then lsvg_out=$(LC_ALL=C lsvg -L $vg) if [[ $lsvg_out == @(*VG PERMISSION:*read/write*) && $lsvg_out != @(*VG Mode:*Non-Concurrent*) ]] then vg_on_mode="active" else vg_on_mode="serial" fi elif LC_ALL=C lsvg -L $vg 2>/dev/null | grep -q -i -w 'passive-only' then vg_on_mode="passive" fi if [[ -n $vg_on_mode ]] # vary'd on in some fashion then if (( $mode == 32 )) # This is an ECM volume group then if lqueryvg -g $vgid >/dev/null 2>&1 then if [[ $vg_on_mode == "active" ]] then # : The volume group is most definitely accessible, because LVM can read : it. Quit, as it appears in the list of 'on' volume groups. # continue elif [[ $vg_on_mode != "passive" ]] then # : The volume group is on line, but not in concurrent mode : This would be the case if the volume group were manually : varied on line, and then HA started. Try to bring the : volume group offline, so that it can be brought on line : in concurrent mode. # if ! cleanup_vg $vg "$hdisklist" then # : Varyoff failed, most likely because the logical volumes : were not closed. Not much we can do at this point... # /bin/dspmsg scripts.cat 28 "$PROGNAME: Failed varyoff of $vg\n" $PROGNAME $vg continue fi # : If node is foreced down then Varyoff for any passive VG is not done. : Varyon is not required for any passive VG as varyoff is not performed. # elif [[ $vg_on_mode == "passive" ]] then continue fi else # : We get here when the volume group looks varyd on, but cannot : be read. This would be the case where a loss of quorum caused : the volume group to be closed. To try to bring it back on : line, it must first be varyd off. # if ! cleanup_vg $vg "$hdisklist" then # : Varyoff failed, most likely because the logical volumes : were not closed. Not much we can do at this point... # /bin/dspmsg scripts.cat 28 "$PROGNAME: Failed varyoff of $vg\n" $PROGNAME $vg continue fi # : Varyoff worked. The code below will attempt to vary on the : volume group again, using force if authorized and appropriate # fi elif (( $mode == 99 )) then # : We cannot tell what kind of volume group $vg is - though : LVM does not have it defined as concurrent mode. Try to : vary it off, so that we can vary it on in passive mode. # if ! cleanup_vg $vg "$hdisklist" # if varyoff fails then cl_log 298 "$PROGNAME: Could not vary on volume group $vg in passive mode because it is currently in use" $PROGNAME $vg continue # go on to the next one fi # if varyoff works, will try a # passive varyon below get_vg_mode "$hdisklist" $vgid $vg $syncflag fi else # : Volume group is currently not on line in any mode # if (( 99 == $mode )) && [[ -z $syncflag ]] ; then # : Checking if SCSI PR is enabled and it is so, : confirming if the SCSI PR reservations are intact. # typeset SCSIPR_ENABLED=$(clodmget -n -q "policy=scsi" -f value HACMPsplitmerge) if [[ $SCSIPR_ENABLED == Yes ]] then cl_confirm_scsipr_res $vg if (( $? != 0 )) then # : Not sure if the reservation exists on the VG, $vg. : It will still be let varyon in passive mode. # fi fi # : Could not determine the mode, since no disk was readable. : Since this is the first node up in the cluster, no other node : should own the VG. Issue a breakres and check mode again. # cl_set_vg_fence_height -c $vg rw RC=$? if (( $ENODEV == $RC )) then # : In the event of a stale fence group, recreate it # cl_vg_fence_redo -c $vg rw RC=$? fi if (( $RC != 0 )) then # : Log any error, but continue. If this is a real problem, the varyonvg will fail # rw=$(dspmsg -s 103 cspoc.cat 350 'read/write' | cut -f2 -d,) cl_log 10511 "$PROGNAME: Volume group $vg fence height could not be set to read/write" $PROGNAME $vg $rw fi cl_disk_available -v -s $hdisklist # : Just in case LVM needs its state cleaned up # varyoffvg $vg # : Try again to find out what kind of volume group this is # get_vg_mode "$hdisklist" $vgid $vg $syncflag fi fi if (( 32 != $mode && 99 != $mode )) then # : If this is an ordinary VG, try and make it ECM # if [[ -n $vg_on_mode ]] # already vary'd on then if ! cleanup_vg $vg "$hdisklist" # if varyoff fails then cl_log 298 "$PROGNAME: Could not vary on volume group $vg in passive mode because it is currently in use" $PROGNAME $vg continue # go on to the next one fi # if varyoff works, will try to make it ECM below fi if cl_makecm -C $vg # Make it ECM if we can then get_vg_mode "$hdisklist" $vgid $vg $syncflag # And check to see if that worked fi fi if (( 32 == $mode )) then # : If this is actually an enhanced concurrent mode volume group, : bring it on line in passive mode. Other kinds are just skipped. # varyonp $vg "$hdisklist" $syncflag fi done # This script always returns success. An event error is not flagged # even if a passive varyon fails, since a normal varyon will be tried # later. return 0;