#!/bin/ksh93
#  ALTRAN_PROLOG_BEGIN_TAG
#  This is an automatically generated prolog.
#
#  Copyright (C) Altran ACT S.A.S. 2019,2021.  All rights reserved.
#
#  ALTRAN_PROLOG_END_TAG
#
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
# 61haes_r714 src/43haes/usr/sbin/cluster/cspoc/utilities/clupdatevg.sh 1.15.2.2 
#  
# Licensed Materials - Property of IBM 
#  
# COPYRIGHT International Business Machines Corp. 1998,2015 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)  7d4c34b 43haes/usr/sbin/cluster/cspoc/utilities/clupdatevg.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM

###############################################################################
#
# Name:
#       clupdatevg.sh
#
# Description:
#
#	This routine is called to update the ODM information associated with
#	a volume group on the node on which it is run.  It is typically called
#	as part of C-SPOC processing to get the nodes on which an LVM
#	operation was not performed to pick up the updated volume group
#	definition.
#
#	The normal flow of C-SPOC volume group operations is:
#	1. Find the status of the volume group on all nodes in the owning
#	   resource group
#	2. Find a node on which the volume group is vary'd on, or, if it is
#	   not vary'd on, pick a node and vary it on
#	3. Perform the volume group operation on that node
#	4. Remove the reserve from the volume group (varyonvg -b -u)
#	5. Run clupdatevg on all other nodes in the volume group's owning
#	   resource group
#	6. Restore the volume group to its original state (vary'd off, or
#	   vary'd on with reserve)
#
#	All steps above except 3) are typically performed by various routines
#	in lvm_utils.cel
#
# Usage:    clupdatevg <vg> <pvid>
#	    where
#		<vg> is the name of the volume group to be updated.  It must
#			must not be vary'd on on this node, nor have a reserve
#			held by any other
#		<pvid> defines a disk in that volume group which has accurate
#		       volume group configuration information in the VGDA
#
# Return Values:
#       0       success
#       1       failure
#
################################################################################

PROGNAME=${0##*/}
PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)"
if [[ $VERBOSE_LOGGING == "high" ]]
then
    set -x
    version="7d4c34b 43haes/usr/sbin/cluster/cspoc/utilities/clupdatevg.sh, 726, 2147A_aha726, Feb 05 2021 09:50 PM"
fi
HA_DIR="es"

#
:	Check the number of arguments - there are supposed to be exactly two: the
:	volume group name and the PVID of a disk.
#
(( $# != 2 )) && {
   dspmsg -s 2 cspoc.cat 45 "Usage: clupdatevg <vg> <pvid>\n"
   exit 1
}

#
:	Get the volume group and disk PVID from the command line
#
VG=$1
PVID=$2

#
:	Get the physical volume name for this physical id on this machine
#
PVN=$(clodmget -q "attribute = pvid and value like ${PVID}*" -f name -n CuAt)

if [[ -z $PVN ]] ; then
    #
    :	The given PVID is not known on this node
    #
    dspmsg -s 2 cspoc.cat 40 "Physical volume $PVID is not known on this node\n" $PVID
    exit 1
fi

#
:	If the disk is not in the "available" state, or not readable
#
if [[ -z $(LC_ALL=C lsdev -Cc disk -l $PVN -S A -F 'status') ]] ||
   ! cl_querypv /dev/$PVN ; then
    #
    :	This may be due to ghost disks getting in the way.  See if we can get
    :	rid of them.
    #	(OTOH, if the disk has a reserve due to the volume group
    #	being vary'd on another node, they're not going to like what happens
    #	next.)
    #
    #	Pass -v option to cl_disk_available to tell it to not retain the disk reservation
    #
    cl_disk_available -s -v $PVN $(cl_fs2disk -pg $VG 2>/dev/null)

	#
	:	Now, try that again
	#
	if [[ -z $(LC_ALL=C lsdev -Cc disk -l $PVN -S A -F 'status') ]] ||
	   ! cl_querypv /dev/$PVN ; then
	   #
	   :	Still no go.  No further recovery.
	   #
        dspmsg -s 2 cspoc.cat 40 "Physical volume $PVID is not known on this node\n" $PVID
        exit 1
    fi
fi

#
:   If the volumegroup $VG is already on line in passive mode, there is
:   nothing that needs to be done - LVM has kept the state in sync and 
:   an importvg -L is unneeded.
#
if LC_ALL=C lsvg $VG | grep 'VG PERMISSION:' | egrep -q 'passive|read/write'
then
    return 0
fi

#
:   Now, import the volume group using the given physical volume
#
# To run 'importvg -L', it is required to set the VG fence height to 'rw'
#
cl_set_vg_fence_height -c $VG rw
RC=$?
# Check with standard fence failure return code(19)
if (( $RC == 19 ))
then
    #
    :   In the event of a stale fence group, recreate it
    #
    cl_vg_fence_redo -c $VG rw
    RC=$?
fi
if (( $RC != 0 ))
then
    rw=$(dspmsg -s 103 cspoc.cat 350 'read/write' | cut -f2 -d,)
    dspmsg -s 43 cspoc.cat 50 "$PROGNAME: Volume group $VG fence height could not be set to read/write\n" $PROGNAME $VG "$rw"
    exit 1
fi
if ! importvg -L $VG -R $PVN
then
    #
    :	Load the "CL_PVID_ASSIGNMENT" from the environment file, if
    :	present, allowing for any unexpected leading whitespace.
    #
    typeset TAB=$'\t'
    typeset INDENT="$TAB "
    eval $(grep -w '^[$INDENT]*CL_PVID_ASSIGNMENT' /etc/environment)

    #
    :	Recognized values for CL_PVID_ASSIGNMENT are "false", "no",
    :	"disable", or zero. Any other value preserved the default
    :	behavior, allowing PVIDs to be automatically assigned, as
    :	needed.
    #
    if [[ -z $CL_PVID_ASSIGNMENT ||
	$CL_PVID_ASSIGNMENT != @(f|F|n|N|d|D|0)* ]]
    then
	#
	:   One way importvg has been observed to fail is that the volume group
	:   contains a disk without a PVID due to extension/creation on another
	:   node without benefit of C-SPOC.  Correct any such error, and try
	:   again.
	#
	LC_ALL=C lspv | grep -w $VG | grep -iw "none" | while read diskname rest ; do
	    chdev -l $diskname -a "pv=yes"
	done
	#
	:   Now, try that again
	#
	if ! importvg -L $VG -R $PVN 
	then
	    #
	    :   Failure on the second time is pretty serious
	    #
	    dspmsg -s 2 cspoc.cat 42 "importvg -L %s failed!\n" "$VG -R $PVN"
	    # Done with importvg, revert the fence height to 'ro'            
	    cl_set_vg_fence_height -c $VG ro
            if (( $? != 0 )); then
                ro=$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)
                dspmsg -s 43 cspoc.cat 50 "$PROGNAME: Volume group $VG fence height could not be set to read only\n" $PROGNAME $VG "$ro"
            fi
	    exit 1
	fi
    fi
fi

# Done with importvg, revert the fence height to 'ro'
cl_set_vg_fence_height -c $VG ro
if (( $? != 0 )); then
    ro=$(dspmsg -s 103 cspoc.cat 350 'read only' | cut -f1 -d,)
    dspmsg -s 43 cspoc.cat 50 "$PROGNAME: Volume group $VG fence height could not be set to read only\n" $PROGNAME $VG "$ro"
    exit 1
fi

#
:   One of the attributes of success at this point is that the time stamps in
:   the ODM for this volume group, and the timestamps on the disk match
#
vgda_time_stamp=$(clvgdats $PVN)
odm_time_stamp=$(getlvodm -T $VG 2>/dev/null)

#
:   We appear to be running on a version of LVM that does not build timestamps
:   in ODM.  Build one for this volume group.
#
if [[ -z $odm_time_stamp || 0 == $odm_time_stamp ]] ; then
    print "CuAt:" > /tmp/clupdatevg.$$
    print "name = $VG" >> /tmp/clupdatevg.$$
    print "attribute = timestamp" >> /tmp/clupdatevg.$$
    print "value = $vgda_time_stamp" >> /tmp/clupdatevg.$$
    print "type = R" >> /tmp/clupdatevg.$$
    print "generic = DU" >> /tmp/clupdatevg.$$
    print "rep = s" >> /tmp/clupdatevg.$$
    print "nls_index = 0" >> /tmp/clupdatevg.$$
    odmadd /tmp/clupdatevg.$$ && rm -f /tmp/clupdatevg.$$

#
:   If for some reason the timestamp on ODM does not match that on the disk -
:   which it should after a successful importvg -L - update it to do so
#
elif [[ $vgda_time_stamp != $odm_time_stamp ]] ; then
    putlvodm -T $vgda_time_stamp  $(getlvodm -v $VG)
    #
    :	Call savebase, after updating the time stamp 
    :	which saves the CuAt into the boot device.
    #
    /usr/sbin/savebase > /dev/null
fi